Example usage for com.mongodb BulkWriteException getWriteErrors

List of usage examples for com.mongodb BulkWriteException getWriteErrors

Introduction

In this page you can find the example usage for com.mongodb BulkWriteException getWriteErrors.

Prototype

public List<BulkWriteError> getWriteErrors() 

Source Link

Document

The list of errors, which will not be null, but may be empty (if the write concern error is not null).

Usage

From source file:com.everydots.kafka.connect.mongodb.MongoDbSinkTask.java

License:Apache License

@Override
public void put(Collection<SinkRecord> records) {

    if (records.isEmpty()) {
        logger.debug("no records to write for current poll operation");
        return;// w w w.  ja  v  a  2s . c  om
    }

    MongoCollection<BsonDocument> mongoCollection = database.getCollection(
            sinkConfig.getString(MongoDbSinkConnectorConfig.MONGODB_COLLECTION_CONF), BsonDocument.class);

    List<? extends WriteModel<BsonDocument>> docsToWrite = sinkConfig.isUsingCdcHandler()
            ? buildWriteModelCDC(records)
            : buildWriteModel(records);

    try {
        logger.debug("#records to write: {}", docsToWrite.size());
        if (!docsToWrite.isEmpty()) {
            BulkWriteResult result = mongoCollection.bulkWrite(docsToWrite, BULK_WRITE_OPTIONS);
            logger.debug("write result: " + result.toString());
        }
    } catch (MongoException mexc) {

        if (mexc instanceof BulkWriteException) {
            BulkWriteException bwe = (BulkWriteException) mexc;
            logger.error("mongodb bulk write (partially) failed", bwe);
            logger.error(bwe.getWriteResult().toString());
            logger.error(bwe.getWriteErrors().toString());
            logger.error(bwe.getWriteConcernError().toString());
        } else {
            logger.error("error on mongodb operation", mexc);
            logger.error("writing {} record(s) failed -> remaining retries ({})", records.size(),
                    remainingRetries);
        }

        if (remainingRetries-- <= 0) {
            throw new ConnectException(
                    "couldn't successfully process records" + " despite retrying -> giving up :(", mexc);
        }

        logger.debug("deferring retry operation for {}ms", deferRetryMs);
        context.timeout(deferRetryMs);
        throw new RetriableException(mexc.getMessage(), mexc);
    }

}

From source file:com.redhat.lightblue.mongo.crud.BasicDocDeleter.java

License:Open Source License

@Override
public void delete(CRUDOperationContext ctx, DBCollection collection, DBObject mongoQuery,
        CRUDDeleteResponse response) {//from  ww w. ja  v a 2  s .  co m
    LOGGER.debug("Removing docs with {}", mongoQuery);

    int numDeleted = 0;

    if (!hookOptimization || ctx.getHookManager().hasHooks(ctx, CRUDOperation.DELETE)) {
        LOGGER.debug("There are hooks, retrieve-delete");
        try (DBCursor cursor = collection.find(mongoQuery, null)) {
            // Set read preference to primary for read-for-update operations
            cursor.setReadPreference(ReadPreference.primary());

            // All docs, to be put into the context
            ArrayList<DocCtx> contextDocs = new ArrayList<>();
            // ids to delete from the db
            List<Object> idsToDelete = new ArrayList<>(batchSize);
            while (cursor.hasNext()) {

                // We will use this index to access the documents deleted in this batch
                int thisBatchIndex = contextDocs.size();
                if (idsToDelete.size() < batchSize) {
                    // build batch
                    DBObject doc = cursor.next();
                    DocTranslator.TranslatedDoc tdoc = translator.toJson(doc);
                    DocCtx docCtx = new DocCtx(tdoc.doc, tdoc.rmd);
                    docCtx.setOriginalDocument(docCtx);
                    docCtx.setCRUDOperationPerformed(CRUDOperation.DELETE);
                    contextDocs.add(docCtx);
                    idsToDelete.add(doc.get(MongoCRUDController.ID_STR));
                }

                if (idsToDelete.size() == batchSize || !cursor.hasNext()) {
                    // batch built or run out of documents                        
                    BulkWriteOperation bw = collection.initializeUnorderedBulkOperation();

                    for (Object id : idsToDelete) {
                        // doing a bulk of single operations instead of removing by initial query
                        // that way we know which documents were not removed
                        bw.find(new BasicDBObject("_id", id)).remove();
                    }

                    BulkWriteResult result = null;
                    try {
                        if (writeConcern == null) {
                            LOGGER.debug("Bulk deleting docs");
                            result = bw.execute();
                        } else {
                            LOGGER.debug("Bulk deleting docs with writeConcern={} from execution",
                                    writeConcern);
                            result = bw.execute(writeConcern);
                        }
                        LOGGER.debug("Bulk deleted docs - attempted {}, deleted {}", idsToDelete.size(),
                                result.getRemovedCount());
                    } catch (BulkWriteException bwe) {
                        LOGGER.error("Bulk write exception", bwe);
                        handleBulkWriteError(bwe.getWriteErrors(),
                                contextDocs.subList(thisBatchIndex, contextDocs.size()));
                        result = bwe.getWriteResult();
                    } catch (RuntimeException e) {
                        LOGGER.error("Exception", e);
                        throw e;
                    } finally {

                        numDeleted += result.getRemovedCount();
                        // clear list before processing next batch
                        idsToDelete.clear();
                    }
                }
            }
            ctx.setDocumentStream(new ListDocumentStream<DocCtx>(contextDocs));
        }
    } else {
        LOGGER.debug("There are no hooks, deleting in bulk");
        try {
            if (writeConcern == null) {
                numDeleted = collection.remove(mongoQuery).getN();
            } else {
                numDeleted = collection.remove(mongoQuery, writeConcern).getN();
            }
        } catch (MongoException e) {
            LOGGER.error("Deletion error", e);
            throw e;
        }
        ctx.setDocumentStream(new ListDocumentStream<DocCtx>(new ArrayList<DocCtx>()));
    }

    response.setNumDeleted(numDeleted);
}

From source file:com.redhat.lightblue.mongo.crud.BasicDocSaver.java

License:Open Source License

private void insertDocs(CRUDOperationContext ctx, DBCollection collection, List<DocInfo> list) {
    if (!list.isEmpty()) {
        LOGGER.debug("Inserting {} docs", list.size());
        if (!md.getAccess().getInsert().hasAccess(ctx.getCallerRoles())) {
            for (DocInfo doc : list) {
                doc.inputDoc.addError(// w  w w . j  a  v a  2s .  co  m
                        Error.get("insert", MongoCrudConstants.ERR_NO_ACCESS, "insert:" + md.getName()));
            }
        } else {
            List<DocInfo> insertionAttemptList = new ArrayList<>(list.size());
            for (DocInfo doc : list) {
                Set<Path> paths = roleEval.getInaccessibleFields_Insert(doc.inputDoc);
                LOGGER.debug("Inaccessible fields:{}", paths);
                if (paths == null || paths.isEmpty()) {
                    DocTranslator.populateDocHiddenFields(doc.newDoc, md);
                    DocVerUtil.overwriteDocVer(doc.newDoc, docver);
                    insertionAttemptList.add(doc);
                } else {
                    for (Path path : paths) {
                        doc.inputDoc.addError(
                                Error.get("insert", CrudConstants.ERR_NO_FIELD_INSERT_ACCESS, path.toString()));
                    }
                }
            }
            LOGGER.debug("After access checks, inserting {} docs", insertionAttemptList.size());
            if (!insertionAttemptList.isEmpty()) {
                BulkWriteOperation bw = collection.initializeUnorderedBulkOperation();
                for (DocInfo doc : insertionAttemptList) {
                    ctx.getFactory().getInterceptors().callInterceptors(InterceptPoint.PRE_CRUD_INSERT_DOC, ctx,
                            doc.inputDoc);
                    bw.insert(doc.newDoc);
                    doc.inputDoc.setCRUDOperationPerformed(CRUDOperation.INSERT);
                }
                try {
                    if (writeConcern == null) {
                        LOGGER.debug("Bulk inserting docs");
                        bw.execute();
                    } else {
                        LOGGER.debug("Bulk inserting docs with writeConcern={} from execution", writeConcern);
                        bw.execute(writeConcern);
                    }
                } catch (BulkWriteException bwe) {
                    LOGGER.error("Bulk write exception", bwe);
                    handleBulkWriteError(bwe.getWriteErrors(), "insert", insertionAttemptList);
                } catch (RuntimeException e) {
                    LOGGER.error("Exception", e);
                    throw e;
                } finally {
                }
            }
        }
    }
}

From source file:com.redhat.lightblue.mongo.crud.BatchUpdate.java

License:Open Source License

/**
 * Runs a batch update using bwo/*from ww w .  ja v a 2  s .  co m*/
 *
 * @param bwo The bulk write operation
 * @param writeConcern
 * @param batchSize
 * @param results The results are populated during this call with an error for each failed doc
 * @param logger The logger
 *
 * @return If returns true, all docs are updated. Otherwise, there
 * are some failed docs, and concurrent update error detection
 * should be called
 */
public static boolean batchUpdate(BulkWriteOperation bwo, WriteConcern writeConcern, int batchSize,
        Map<Integer, Error> results, Logger logger) {
    boolean ret = true;
    BulkWriteResult writeResult;
    logger.debug("attemptToUpdate={}", batchSize);
    try {
        if (writeConcern == null) {
            writeResult = bwo.execute();
        } else {
            writeResult = bwo.execute(writeConcern);
        }
        logger.debug("writeResult={}", writeResult);
        if (batchSize == writeResult.getMatchedCount()) {
            logger.debug("Successful update");
        } else {
            logger.warn("notUpdated={}", batchSize - writeResult.getMatchedCount());
            ret = false;
        }
    } catch (BulkWriteException e) {
        List<BulkWriteError> writeErrors = e.getWriteErrors();
        if (writeErrors != null) {
            for (BulkWriteError we : writeErrors) {
                if (MongoCrudConstants.isDuplicate(we.getCode())) {
                    results.put(we.getIndex(),
                            Error.get("update", MongoCrudConstants.ERR_DUPLICATE, we.getMessage()));
                } else {
                    results.put(we.getIndex(),
                            Error.get("update", MongoCrudConstants.ERR_SAVE_ERROR, we.getMessage()));
                }
            }
        }
        ret = false;
    }
    return ret;
}

From source file:org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentStore.java

License:Apache License

private <T extends Document> BulkUpdateResult sendBulkUpdate(Collection<T> collection,
        java.util.Collection<UpdateOp> updateOps, Map<String, T> oldDocs) {
    DBCollection dbCollection = getDBCollection(collection);
    BulkWriteOperation bulk = dbCollection.initializeUnorderedBulkOperation();
    String[] bulkIds = new String[updateOps.size()];
    int i = 0;// w  ww.j a va 2s . c om
    for (UpdateOp updateOp : updateOps) {
        String id = updateOp.getId();
        QueryBuilder query = createQueryForUpdate(id, updateOp.getConditions());
        T oldDoc = oldDocs.get(id);
        DBObject update;
        if (oldDoc == null) {
            query.and(Document.MOD_COUNT).exists(false);
            update = createUpdate(updateOp, true);
        } else {
            query.and(Document.MOD_COUNT).is(oldDoc.getModCount());
            update = createUpdate(updateOp, false);
        }
        bulk.find(query.get()).upsert().updateOne(update);
        bulkIds[i++] = id;
    }

    BulkWriteResult bulkResult;
    Set<String> failedUpdates = new HashSet<String>();
    Set<String> upserts = new HashSet<String>();
    try {
        bulkResult = bulk.execute();
    } catch (BulkWriteException e) {
        bulkResult = e.getWriteResult();
        for (BulkWriteError err : e.getWriteErrors()) {
            failedUpdates.add(bulkIds[err.getIndex()]);
        }
    }
    for (BulkWriteUpsert upsert : bulkResult.getUpserts()) {
        upserts.add(bulkIds[upsert.getIndex()]);
    }
    return new BulkUpdateResult(failedUpdates, upserts);
}

From source file:org.lucee.mongodb.DBCollectionImpl.java

License:Open Source License

@Override
public Object call(PageContext pc, Key methodName, Object[] args) throws PageException {

    // aggregate// w w  w . j a v a 2  s  .  c  om
    if (methodName.equals("aggregate")) {
        boolean hasOptions = false;
        AggregationOptions options = null;
        int len = checkArgLength("aggregate", args, 1, -1); // no length limitation
        List<DBObject> pipeline = new ArrayList<DBObject>();
        // Pipeline array as single argument
        if (len == 1 && decision.isArray(args[0])) {
            Array arr = caster.toArray(args[0]);
            if (arr.size() == 0)
                throw exp.createApplicationException(
                        "the array passed to the function aggregate needs at least 1 element");

            Iterator<Object> it = arr.valueIterator();
            while (it.hasNext()) {
                pipeline.add(toDBObject(it.next()));
            }
        } else {
            // First argument is pipeline of operations, second argument is struct of options --> returns cursor!
            if (len == 2 && decision.isArray(args[0]) && decision.isStruct(args[1])) {
                Array arr = caster.toArray(args[0]);
                Iterator<Object> it = arr.valueIterator();
                while (it.hasNext()) {
                    pipeline.add(toDBObject(it.next()));
                }

                hasOptions = true;
                // options builder
                AggregationOptions.Builder optbuilder = AggregationOptions.builder()
                        .outputMode(AggregationOptions.OutputMode.CURSOR);

                DBObject dboOpts = toDBObject(args[1]);
                if (dboOpts.containsField("allowDiskUse")) {
                    if (!decision.isBoolean(dboOpts.get("allowDiskUse")))
                        throw exp.createApplicationException("allowDiskUse in options must be boolean value");

                    optbuilder = optbuilder.allowDiskUse(caster.toBooleanValue(dboOpts.get("allowDiskUse")));
                }
                if (dboOpts.containsField("cursor")) {
                    if (!decision.isStruct(dboOpts.get("cursor")))
                        throw exp.createApplicationException(
                                "cursor in options must be struct with optional key batchSize");

                    DBObject cursoropts = toDBObject(dboOpts.get("cursor"));
                    if (cursoropts.containsField("batchSize")) {
                        if (!decision.isNumeric(cursoropts.get("batchSize")))
                            throw exp.createApplicationException("cursor.batchSize in options must be integer");

                        optbuilder = optbuilder.batchSize(caster.toIntValue(cursoropts.get("batchSize")));
                    }
                }

                options = optbuilder.build();
            }
            // First argument is first operation, second argument is array of additional operations
            else if (len == 2 && decision.isArray(args[1])) {
                Array arr = caster.toArray(args[1]);
                pipeline.add(toDBObject(args[0]));
                Iterator<Object> it = arr.valueIterator();
                while (it.hasNext()) {
                    pipeline.add(toDBObject(it.next()));
                }
            }
            // N arguments of pipeline operations
            else {
                for (int i = 0; i < len; i++) {
                    pipeline.add(toDBObject(args[i]));
                }
            }
        }

        if (hasOptions) {
            // returns Cursor - requires >= MongoDB 2.6
            return toCFML(coll.aggregate(pipeline, options));
        } else {
            // returns AggregationOutput
            return toCFML(coll.aggregate(pipeline));
        }
    }
    // count
    if (methodName.equals("count")) {
        int len = checkArgLength("count", args, 0, 1);
        if (len == 0) {
            return toCFML(coll.count());
        } else if (len == 1) {
            return toCFML(coll.count(toDBObject(args[0])));
        }
    }
    // dataSize
    if (methodName.equals("dataSize")) {
        checkArgLength("dataSize", args, 0, 0);
        return toCFML(coll.getStats().get("size"));
    }

    // distinct
    if (methodName.equals("distinct")) {
        int len = checkArgLength("distinct", args, 1, 2);
        if (len == 1) {
            return toCFML(coll.distinct(caster.toString(args[0])));
        } else if (len == 2) {
            return toCFML(coll.distinct(caster.toString(args[0]), toDBObject(args[1])));
        }
    }
    // drop
    if (methodName.equals("drop")) {
        checkArgLength("drop", args, 0, 0);
        coll.drop();
        return null;
    }

    // dropIndex
    if (methodName.equals("dropIndex")) {
        checkArgLength("dropIndex", args, 1, 1);
        DBObject dbo = toDBObject(args[0], null);
        if (dbo != null)
            coll.dropIndex(dbo);
        else
            coll.dropIndex(caster.toString(args[0]));

        return null;
    }
    // dropIndexes
    if (methodName.equals("dropIndexes")) {
        int len = checkArgLength("dropIndexes", args, 0, 1);
        if (len == 0) {
            coll.dropIndexes();
            return null;
        } else if (len == 1) {
            coll.dropIndexes(caster.toString(args[0]));
            return null;
        }
    }

    // createIndex
    if (methodName.equals("createIndex") || methodName.equals("ensureIndex")) {
        int len = checkArgLength("createIndex", args, 1, 3);
        if (len == 1) {
            DBObject dbo = toDBObject(args[0], null);
            if (dbo != null)
                coll.createIndex(dbo);
            else
                coll.createIndex(caster.toString(args[0]));
            return null;
        }
        if (len == 2) {
            DBObject p1 = toDBObject(args[0]);
            DBObject p2 = toDBObject(args[1], null);
            if (p2 != null)
                coll.createIndex(p1, p2);
            else
                coll.createIndex(p1, caster.toString(args[1]));
            return null;
        } else if (len == 3) {
            coll.createIndex(toDBObject(args[0]), caster.toString(args[1]), caster.toBooleanValue(args[2]));
            return null;
        }
    }

    // getStats
    if (methodName.equals("getStats") || methodName.equals("stats")) {
        checkArgLength("getStats", args, 0, 0);
        return toCFML(coll.getStats());
    }

    // getIndexes
    if (methodName.equals("getIndexes") || methodName.equals("getIndexInfo")) {
        checkArgLength(methodName.getString(), args, 0, 0);
        return toCFML(coll.getIndexInfo());
    }

    // getWriteConcern
    if (methodName.equals("getWriteConcern")) {
        checkArgLength("getWriteConcern", args, 0, 0);
        return toCFML(coll.getWriteConcern());
    }

    // find
    if (methodName.equals("find")) {
        int len = checkArgLength("find", args, 0, 3);
        DBCursor cursor = null;
        if (len == 0) {
            cursor = coll.find();
        } else if (len == 1) {
            cursor = coll.find(toDBObject(args[0]));
        } else if (len == 2) {
            cursor = coll.find(toDBObject(args[0]), toDBObject(args[1]));
        } else if (len == 3) {
            cursor = coll.find(toDBObject(args[0]), toDBObject(args[1])).skip(caster.toIntValue(args[2]));
        }

        return toCFML(cursor);
    }
    // findOne
    else if (methodName.equals("findOne")) {
        int len = checkArgLength("findOne", args, 0, 3);
        DBObject obj = null;
        if (len == 0) {
            obj = coll.findOne();
        } else if (len == 1) {
            DBObject arg1 = toDBObject(args[0], null);
            if (arg1 != null)
                obj = coll.findOne(arg1);
            else
                obj = coll.findOne(args[0]);

        } else if (len == 2) {
            DBObject arg1 = toDBObject(args[0], null);
            if (arg1 != null)
                obj = coll.findOne(arg1, toDBObject(args[1]));
            else
                obj = coll.findOne(args[0], toDBObject(args[1]));
        } else if (len == 3) {
            obj = coll.findOne(toDBObject(args[0]), toDBObject(args[1]), toDBObject(args[2]));
        }
        return toCFML(obj);
    }
    // findAndRemove
    if (methodName.equals("findAndRemove")) {
        checkArgLength("findAndRemove", args, 1, 1);
        DBObject obj = coll.findAndRemove(toDBObject(args[0]));
        return toCFML(obj);
    }
    // findAndModify
    if (methodName.equals("findAndModify")) {
        int len = args == null ? 0 : args.length;
        if (len != 2 && len != 3 && len != 7) {
            throw exp.createApplicationException(
                    "the function findAndModify needs 2, 3 or 7 arguments, but you have defined only " + len);
        }
        DBObject obj = null;
        if (len == 2) {
            obj = coll.findAndModify(toDBObject(args[0]), toDBObject(args[1]));
        } else if (len == 3) {
            obj = coll.findAndModify(toDBObject(args[0]), toDBObject(args[1]), toDBObject(args[2]));
        } else if (len == 7) {
            obj = coll.findAndModify(toDBObject(args[0]), toDBObject(args[1]), toDBObject(args[2]),
                    caster.toBooleanValue(args[3]), toDBObject(args[4]), caster.toBooleanValue(args[5]),
                    caster.toBooleanValue(args[6]));
        }

        return toCFML(obj);
    }

    //group
    /*
       TODO: needs GroupCommand
    if(methodName.equals("group")) {
       int len=checkArgLength("group",args,1,1);
       if(len==1){
    return toCFML(coll.group(
       toDBObject(args[0])
    ));
       }
    }*/

    // insert
    if (methodName.equals("insert")) {
        checkArgLength("insert", args, 1, 1);
        return toCFML(coll.insert(toDBObjectArray(args[0])));
    }

    // insertMany(required array documents, struct options) valid options keys are string "writeconcern", boolean "ordered"
    if (methodName.equals("insertMany")) {
        int len = checkArgLength("insertMany", args, 1, 2);
        BulkWriteOperation bulk = coll.initializeOrderedBulkOperation();
        WriteConcern wc = coll.getWriteConcern();

        if (len == 2) {
            DBObject dboOpts = toDBObject(args[1]);
            if (dboOpts.containsField("ordered")) {
                if (!decision.isBoolean(dboOpts.get("ordered")))
                    throw exp.createApplicationException("ordered in options must be boolean value");

                if (!caster.toBooleanValue(dboOpts.get("ordered"))) {
                    bulk = coll.initializeUnorderedBulkOperation();
                }
            }

            if (dboOpts.containsField("writeconcern")) {
                WriteConcern newWc = WriteConcern.valueOf(caster.toString(dboOpts.get("writeconcern")));
                if (newWc != null) {
                    wc = newWc;
                }
            }
        }
        Map<String, Object> result = new LinkedHashMap<String, Object>();
        BulkWriteResult bulkResult;
        List<Map> writeErrors = new ArrayList<Map>();

        Array arr = caster.toArray(args[0]);
        if (arr.size() == 0) {
            result.put("nInserted", 0);
            result.put("writeErrors", writeErrors);
            result.put("acknowledged", true);
            return toCFML(result);
        }

        Iterator<Object> it = arr.valueIterator();
        while (it.hasNext()) {
            bulk.insert(toDBObject(it.next()));
        }

        try {
            bulkResult = bulk.execute(wc);
        } catch (BulkWriteException e) {
            Map<String, Object> bulkErrorItem;
            BulkWriteError bulkError;

            bulkResult = e.getWriteResult();
            List<BulkWriteError> errors = e.getWriteErrors();

            Iterator<BulkWriteError> jj = errors.iterator();
            while (jj.hasNext()) {
                bulkErrorItem = new LinkedHashMap<String, Object>();
                bulkError = jj.next();
                bulkErrorItem.put("index", (bulkError.getIndex() + 1)); // +1 so we get index of item in CFML array
                bulkErrorItem.put("code", bulkError.getCode());
                bulkErrorItem.put("errmsg", bulkError.getMessage());
                bulkErrorItem.put("op", bulkError.getDetails());
                writeErrors.add(bulkErrorItem);
            }
        }

        result.put("acknowledged", bulkResult.isAcknowledged());
        if (bulkResult.isAcknowledged()) {
            result.put("nInserted", bulkResult.getInsertedCount());
            result.put("writeErrors", writeErrors);
        }

        return toCFML(result);
    }

    // bulkWrite(required array operations, struct options) valid options keys are string "writeconcern", boolean "ordered", boolean "bypassDocumentValidation"
    // an operation is a struct with the following keys: { "operation":[insert|update|updateOne|remove|removeOne], "document":[(required if operation is insert) - a doc to insert], "query":[(optional) - the query to find for remove/update operations], "update":[(required for update/updateOne) - the update document] }
    // i.e. dbCollection.bulkWrite([
    //       {"operation":"insert", "document":{"test":"insert"}}
    //       ,{"operation":"updateOne", "query":{"_id":"foo"}, "update":{"$set":{"updated":true}}}         
    //       ,{"operation":"removeOne", "query":{"_id":"goaway"}}         
    // ],{"ordered":false})
    if (methodName.equals("bulkWrite")) {
        int len = checkArgLength("bulkWrite", args, 1, 2);
        BulkWriteOperation bulk = coll.initializeOrderedBulkOperation();
        WriteConcern wc = coll.getWriteConcern();

        if (len == 2) {
            DBObject dboOpts = toDBObject(args[1]);
            if (dboOpts.containsField("ordered")) {
                if (!decision.isBoolean(dboOpts.get("ordered")))
                    throw exp.createApplicationException("ordered in options must be boolean value");

                if (!caster.toBooleanValue(dboOpts.get("ordered"))) {
                    bulk = coll.initializeUnorderedBulkOperation();
                }
            }

            if (dboOpts.containsField("bypassDocumentValidation")) {
                if (!decision.isBoolean(dboOpts.get("bypassDocumentValidation")))
                    throw exp.createApplicationException(
                            "bypassDocumentValidation in options must be boolean value");

                bulk.setBypassDocumentValidation(
                        caster.toBooleanValue(dboOpts.get("bypassDocumentValidation")));
            }

            if (dboOpts.containsField("writeconcern")) {
                WriteConcern newWc = WriteConcern.valueOf(caster.toString(dboOpts.get("writeconcern")));
                if (newWc != null) {
                    wc = newWc;
                }
            }
        }
        Map<String, Object> result = new LinkedHashMap<String, Object>();
        BulkWriteResult bulkResult;
        List<Map> writeErrors = new ArrayList<Map>();

        Array arr = caster.toArray(args[0]);
        if (arr.size() == 0) {
            result.put("nInserted", 0);
            result.put("nMatched", 0);
            result.put("nModified", 0);
            result.put("nRemoved", 0);
            result.put("writeErrors", writeErrors);
            result.put("acknowledged", true);
            return toCFML(result);
        }

        Iterator<Object> it = arr.valueIterator();
        while (it.hasNext()) {

            DBObject operation = toDBObject(it.next());

            if (operation.get("operation") == "update") {
                // do stuff to add update operation
                bulk.find(toDBObject(operation.get("query"))).update(toDBObject(operation.get("update")));
            } else if (operation.get("operation") == "updateOne") {
                // do stuff to add updateOne operation
                bulk.find(toDBObject(operation.get("query"))).updateOne(toDBObject(operation.get("update")));
            } else if (operation.get("operation") == "remove") {
                // do stuff to add remove operation
                bulk.find(toDBObject(operation.get("query"))).remove();
            } else if (operation.get("operation") == "removeOne") {
                // do stuff to add removeOne operation
                bulk.find(toDBObject(operation.get("query"))).removeOne();
            } else if (operation.get("operation") == "insert") {
                bulk.insert(toDBObject(operation.get("document")));
            }

        }

        try {
            bulkResult = bulk.execute(wc);
        } catch (BulkWriteException e) {
            Map<String, Object> bulkErrorItem;
            BulkWriteError bulkError;

            bulkResult = e.getWriteResult();
            List<BulkWriteError> errors = e.getWriteErrors();

            Iterator<BulkWriteError> jj = errors.iterator();
            while (jj.hasNext()) {
                bulkErrorItem = new LinkedHashMap<String, Object>();
                bulkError = jj.next();
                bulkErrorItem.put("index", (bulkError.getIndex() + 1)); // +1 so we get index of item in CFML array
                bulkErrorItem.put("code", bulkError.getCode());
                bulkErrorItem.put("errmsg", bulkError.getMessage());
                bulkErrorItem.put("op", bulkError.getDetails());
                writeErrors.add(bulkErrorItem);
            }
        }

        result.put("acknowledged", bulkResult.isAcknowledged());
        if (bulkResult.isAcknowledged()) {
            result.put("nInserted", bulkResult.getInsertedCount());
            result.put("nMatched", bulkResult.getMatchedCount());
            result.put("nModified", bulkResult.getModifiedCount());
            result.put("nRemoved", bulkResult.getRemovedCount());
            result.put("writeErrors", writeErrors);
        }

        return toCFML(result);
    }

    //mapReduce
    if (methodName.equals("mapReduce")) {
        int len = checkArgLength("mapReduce", args, 4, 4);
        if (len == 4) {
            return toCFML(coll.mapReduce(caster.toString(args[0]), caster.toString(args[1]),
                    caster.toString(args[2]), toDBObject(args[3])));
        }
    }

    // remove
    if (methodName.equals("remove")) {
        checkArgLength("remove", args, 1, 1);
        return toCFML(coll.remove(toDBObject(args[0])));

    }

    // rename
    if (methodName.equals("rename") || methodName.equals("renameCollection")) {
        int len = checkArgLength(methodName.getString(), args, 1, 2);
        if (len == 1) {
            return toCFML(coll.rename(caster.toString(args[0])));
        } else if (len == 2) {
            return toCFML(coll.rename(caster.toString(args[0]), caster.toBooleanValue(args[1])));
        }
    }

    // save
    if (methodName.equals("save")) {
        checkArgLength("save", args, 1, 1);
        return toCFML(coll.save(toDBObject(args[0])));
    }

    // setWriteConcern
    if (methodName.equals("setWriteConcern")) {
        checkArgLength("setWriteConcern", args, 1, 1);
        WriteConcern wc = WriteConcern.valueOf(caster.toString(args[0]));
        if (wc != null) {
            coll.setWriteConcern(wc);
        }
        return null;
    }

    // storageSize
    if (methodName.equals("storageSize")) {
        checkArgLength("storageSize", args, 0, 0);
        return toCFML(coll.getStats().get("storageSize"));
    }

    // totalIndexSize
    if (methodName.equals("totalIndexSize")) {
        checkArgLength("totalIndexSize", args, 0, 0);
        return toCFML(coll.getStats().get("totalIndexSize"));
    }

    // update
    if (methodName.equals("update")) {
        int len = checkArgLength("update", args, 2, 4);
        if (len == 2) {
            return toCFML(coll.update(toDBObject(args[0]), toDBObject(args[1])));
        } else if (len == 3) {
            return toCFML(coll.update(toDBObject(args[0]), toDBObject(args[1]), caster.toBooleanValue(args[2]),
                    false));
        } else if (len == 4) {
            return toCFML(coll.update(toDBObject(args[0]), toDBObject(args[1]), caster.toBooleanValue(args[2]),
                    caster.toBooleanValue(args[3])));
        }
    }

    String functionNames = "aggregate,count,dataSize,distinct,drop,dropIndex,dropIndexes,createIndex,stats,getIndexes,getWriteConcern,find,findOne,findAndRemove,findAndModify,"
            + "group,insert,insertMany,bulkWrite,mapReduce,remove,rename,save,setWriteConcern,storageSize,totalIndexSize,update";

    throw exp.createApplicationException(
            "function " + methodName + " does not exist existing functions are [" + functionNames + "]");
}

From source file:org.opencb.opencga.storage.mongodb.variant.VariantMongoDBAdaptor.java

License:Apache License

/**
 * Two steps insertion:/* w w  w.ja v  a 2s.c  om*/
 *      First check that the variant and study exists making an update.
 *      For those who doesn't exist, pushes a study with the file and genotype information
 *
 *      The documents that throw a "dup key" exception are those variants that exist and have the study.
 *      Then, only for those variants, make a second update.
 *
 * *An interesting idea would be to invert this actions depending on the number of already inserted variants.
 *
 * @param loadedSampleIds Other loaded sampleIds EXCEPT those that are going to be loaded
 * @param data  Variants to insert
 */
QueryResult insert(List<Variant> data, int fileId, DBObjectToVariantConverter variantConverter,
        DBObjectToVariantSourceEntryConverter variantSourceEntryConverter,
        StudyConfiguration studyConfiguration, List<Integer> loadedSampleIds) {
    if (data.isEmpty()) {
        return new QueryResult("insertVariants");
    }
    List<DBObject> queries = new ArrayList<>(data.size());
    List<DBObject> updates = new ArrayList<>(data.size());
    Set<String> nonInsertedVariants;
    String fileIdStr = Integer.toString(fileId);

    {
        nonInsertedVariants = new HashSet<>();
        Map missingSamples = Collections.emptyMap();
        String defaultGenotype = studyConfiguration.getAttributes()
                .getString(MongoDBVariantStorageManager.DEFAULT_GENOTYPE, "");
        if (defaultGenotype.equals(DBObjectToSamplesConverter.UNKNOWN_GENOTYPE)) {
            logger.debug("Do not need fill gaps. DefaultGenotype is UNKNOWN_GENOTYPE({}).");
        } else if (!loadedSampleIds.isEmpty()) {
            missingSamples = new BasicDBObject(DBObjectToSamplesConverter.UNKNOWN_GENOTYPE, loadedSampleIds); // ?/?
        }
        for (Variant variant : data) {
            String id = variantConverter.buildStorageId(variant);
            for (VariantSourceEntry variantSourceEntry : variant.getSourceEntries().values()) {
                if (!variantSourceEntry.getFileId().equals(fileIdStr)) {
                    continue;
                }
                int studyId = studyConfiguration.getStudyId();
                DBObject study = variantSourceEntryConverter.convertToStorageType(variantSourceEntry);
                DBObject genotypes = (DBObject) study
                        .get(DBObjectToVariantSourceEntryConverter.GENOTYPES_FIELD);
                if (genotypes != null) { //If genotypes is null, genotypes are not suppose to be loaded
                    genotypes.putAll(missingSamples); //Add missing samples
                }
                DBObject push = new BasicDBObject(DBObjectToVariantConverter.STUDIES_FIELD, study);
                BasicDBObject update = new BasicDBObject().append("$push", push).append("$setOnInsert",
                        variantConverter.convertToStorageType(variant));
                if (variant.getIds() != null && !variant.getIds().isEmpty()
                        && !variant.getIds().iterator().next().isEmpty()) {
                    update.put("$addToSet", new BasicDBObject(DBObjectToVariantConverter.IDS_FIELD,
                            new BasicDBObject("$each", variant.getIds())));
                }
                // { _id: <variant_id>, "studies.sid": {$ne: <studyId> } }
                //If the variant exists and contains the study, this find will fail, will try to do the upsert, and throw a duplicated key exception.
                queries.add(new BasicDBObject("_id", id).append(
                        DBObjectToVariantConverter.STUDIES_FIELD + "."
                                + DBObjectToVariantSourceEntryConverter.STUDYID_FIELD,
                        new BasicDBObject("$ne", studyId)));
                updates.add(update);
            }
        }
        QueryOptions options = new QueryOptions("upsert", true);
        options.put("multi", false);
        try {
            variantsCollection.update(queries, updates, options);
        } catch (BulkWriteException e) {
            for (BulkWriteError writeError : e.getWriteErrors()) {
                if (writeError.getCode() == 11000) { //Dup Key error code
                    nonInsertedVariants.add(writeError.getMessage().split("dup key")[1].split("\"")[1]);
                } else {
                    throw e;
                }
            }
        }
        queries.clear();
        updates.clear();
    }

    for (Variant variant : data) {
        variant.setAnnotation(null);
        String id = variantConverter.buildStorageId(variant);

        if (nonInsertedVariants != null && !nonInsertedVariants.contains(id)) {
            continue; //Already inserted variant
        }

        for (VariantSourceEntry variantSourceEntry : variant.getSourceEntries().values()) {
            if (!variantSourceEntry.getFileId().equals(fileIdStr)) {
                continue;
            }

            DBObject studyObject = variantSourceEntryConverter.convertToStorageType(variantSourceEntry);
            DBObject genotypes = (DBObject) studyObject
                    .get(DBObjectToVariantSourceEntryConverter.GENOTYPES_FIELD);
            DBObject push = new BasicDBObject();
            if (genotypes != null) { //If genotypes is null, genotypes are not suppose to be loaded
                for (String genotype : genotypes.keySet()) {
                    push.put(
                            DBObjectToVariantConverter.STUDIES_FIELD + ".$."
                                    + DBObjectToVariantSourceEntryConverter.GENOTYPES_FIELD + "." + genotype,
                            new BasicDBObject("$each", genotypes.get(genotype)));
                }
            } else {
                push.put(
                        DBObjectToVariantConverter.STUDIES_FIELD + ".$."
                                + DBObjectToVariantSourceEntryConverter.GENOTYPES_FIELD,
                        Collections.emptyMap());
            }
            push.put(
                    DBObjectToVariantConverter.STUDIES_FIELD + ".$."
                            + DBObjectToVariantSourceEntryConverter.FILES_FIELD,
                    ((List) studyObject.get(DBObjectToVariantSourceEntryConverter.FILES_FIELD)).get(0));
            BasicDBObject update = new BasicDBObject(new BasicDBObject("$push", push));

            queries.add(new BasicDBObject("_id", id).append(
                    DBObjectToVariantConverter.STUDIES_FIELD + "."
                            + DBObjectToVariantSourceEntryConverter.STUDYID_FIELD,
                    studyConfiguration.getStudyId()));
            updates.add(update);

        }

    }
    if (queries.isEmpty()) {
        return new QueryResult();
    } else {
        QueryOptions options = new QueryOptions("upsert", false);
        options.put("multi", false);
        return variantsCollection.update(queries, updates, options);
    }
}

From source file:org.springframework.data.mongodb.BulkOperationException.java

License:Apache License

/**
 * Creates a new {@link BulkOperationException} with the given message and source {@link BulkWriteException}.
 * //w ww  .  j  a  va  2s.c o m
 * @param message must not be {@literal null}.
 * @param source must not be {@literal null}.
 */
public BulkOperationException(String message, BulkWriteException source) {

    super(message, source);

    this.errors = source.getWriteErrors();
    this.result = source.getWriteResult();
}