Example usage for com.mongodb BulkWriteResult getInsertedCount

List of usage examples for com.mongodb BulkWriteResult getInsertedCount

Introduction

In this page you can find the example usage for com.mongodb BulkWriteResult getInsertedCount.

Prototype

public abstract int getInsertedCount();

Source Link

Document

Returns the number of documents inserted by the write operation.

Usage

From source file:com.xoriant.akka.mongodb.bulkimport.actor.MongoInsertionActor.java

License:Apache License

@Override
public void onReceive(Object message) throws Exception {
    if (message instanceof CSVRecordBatchMsg) {
        CSVRecordBatchMsg csvRecordBatch = (CSVRecordBatchMsg) message;
        System.out.println("InsertionActor : Batch no " + csvRecordBatch.getBatchNo() + " received ack");
        DB db = mongoClient.getDB("akka-bulkimport");
        DBCollection personColl = db.getCollection("persons");
        BulkWriteOperation builder = personColl.initializeUnorderedBulkOperation();
        List<BasicDBObject> persons = csvRecordBatch.getRecords();
        for (BasicDBObject personDBObject : persons) {
            if (validate(personDBObject)) {
                builder.insert(personDBObject);
            }//from w w w  . j a va 2  s . c  o m
        }
        BulkWriteResult result = builder.execute();
        BatchCompleteMsg batchComplete = new BatchCompleteMsg(csvRecordBatch.getBatchNo(),
                result.getInsertedCount());
        getSender().tell(batchComplete, getSelf());
    } else if (message instanceof EndOfFileMsg) {
        System.out.println("InsertionActor: EOF received");
    } else {
        unhandled(message);
    }
}

From source file:fr.gouv.vitam.mdbes.MainIngestMDBESFromFile.java

License:Open Source License

private static final void runOnce(final MongoDbAccess dbvitam)
        throws InterruptedException, InstantiationException, IllegalAccessException, IOException {
    System.out.println("Load starting... ");
    int nbThread = ingest.length;

    final long date11 = System.currentTimeMillis();
    if (ingest.length == 1) {
        final FileInputStream fstream = new FileInputStream(ingest[0]);
        final DataInputStream in = new DataInputStream(fstream);
        final BufferedReader br = new BufferedReader(new InputStreamReader(in));
        String strLine;/*from   w  w w.  j a v  a2s .  co  m*/
        int nb = 0;
        final HashMap<String, String> esIndex = new HashMap<>();
        BulkWriteOperation bulk = dbvitam.daips.collection.initializeUnorderedBulkOperation();
        while ((strLine = br.readLine()) != null) {
            final DBObject bson = (DBObject) JSON.parse(strLine);
            bulk.insert(bson);
            ElasticSearchAccess.addEsIndex(dbvitam, model, esIndex, bson);
            nb++;
            if (nb % GlobalDatas.LIMIT_MDB_NEW_INDEX == 0) {
                BulkWriteResult result = bulk.execute();
                int check = result.getInsertedCount();
                if (check != nb) {
                    System.out.print("x");
                } else {
                    System.out.print(".");
                }
                bulk = dbvitam.daips.collection.initializeUnorderedBulkOperation();
                MainIngestFile.cptMaip.addAndGet(check);
                nb = 0;
            }
        }
        if (!esIndex.isEmpty()) {
            System.out.println("Last bulk ES");
            dbvitam.addEsEntryIndex(true, esIndex, model);
            esIndex.clear();
        }
        if (nb != 0) {
            bulk.execute();
            MainIngestFile.cptMaip.addAndGet(nb);
            nb = 0;
        }
    } else {
        // threads
        ExecutorService executorService = Executors.newFixedThreadPool(ingest.length + 1);
        for (int i = 0; i < ingest.length; i++) {
            MainIngestMDBESFromFile ingestrun = new MainIngestMDBESFromFile();
            ingestrun.file = ingest[i];
            executorService.execute(ingestrun);
        }
        // ES
        MainIngestMDBESFromFile ingestrun = new MainIngestMDBESFromFile();
        ingestrun.file = null;
        ingestrun.files = ingest;
        ingestrun.original = dbvitam;
        executorService.execute(ingestrun);

        executorService.shutdown();
        while (!executorService.awaitTermination(10000, TimeUnit.MILLISECONDS)) {
            ;
        }
        System.out.println("Load ended");
        final long nbBigM = dbvitam.getDaipSize();
        final long nbBigD = dbvitam.getPaipSize();
        System.out.println("\n Big Test (" + nbThread + " nb MAIP: " + MainIngestFile.cptMaip.get()
                + ") with MAIP: " + nbBigM + " DATA: " + nbBigD + " => Load:"
                + (loadt.get()) / ((float) MainIngestFile.cptMaip.get() * nbThread));

        System.out.println("\nThread;nbLoad;nbTotal;Load");
        System.out.println(nbThread + ";" + MainIngestFile.cptMaip.get() + ";" + nbBigM + ";"
                + (loadt.get()) / ((float) MainIngestFile.cptMaip.get() * nbThread));
    }
    final long date12 = System.currentTimeMillis();
    MainIngestMDBESFromFile.loadt.set(date12 - date11);

    System.out.println("Load ended");
    /*
     * System.out.println("All elements\n================================================================");
     * DbVitam.printStructure(dbvitam);
     */
    final long nbBigM = dbvitam.getDaipSize();
    final long nbBigD = dbvitam.getPaipSize();
    System.out.println("\n Big Test (" + nbThread + " Threads chacune " + MainIngestFile.nb + " nb MAIP: "
            + MainIngestFile.cptMaip.get() + ") with MAIP: " + nbBigM + " DATA: " + nbBigD + " => Load:"
            + (MainIngestMDBESFromFile.loadt.get()) / ((float) MainIngestFile.cptMaip.get()));

    System.out.println("\nThread;nbLoad;nbTotal;Load");
    System.out.println(nbThread + ";" + MainIngestFile.cptMaip.get() + ";" + nbBigM + ";"
            + (MainIngestMDBESFromFile.loadt.get()) / ((float) MainIngestFile.cptMaip.get()));
}

From source file:fr.gouv.vitam.mdbes.MainIngestMDBFromFile.java

License:Open Source License

private static final void runOnce(final MongoDbAccess dbvitam)
        throws InterruptedException, InstantiationException, IllegalAccessException, IOException {
    System.out.println("Load starting... ");
    int nbThread = ingest.length;

    final long date11 = System.currentTimeMillis();
    if (ingest.length == 1) {
        final FileInputStream fstream = new FileInputStream(ingest[0]);
        final DataInputStream in = new DataInputStream(fstream);
        final BufferedReader br = new BufferedReader(new InputStreamReader(in));
        String strLine;//from   ww  w .j  a v a 2 s .  com
        int nb = 0;
        BulkWriteOperation bulk = dbvitam.daips.collection.initializeUnorderedBulkOperation();
        while ((strLine = br.readLine()) != null) {
            final DBObject bson = (DBObject) JSON.parse(strLine);
            bulk.insert(bson);
            nb++;
            if (nb % GlobalDatas.LIMIT_MDB_NEW_INDEX == 0) {
                BulkWriteResult result = bulk.execute();
                int check = result.getInsertedCount();
                if (check != nb) {
                    System.out.print("x");
                } else {
                    System.out.print(".");
                }
                bulk = dbvitam.daips.collection.initializeUnorderedBulkOperation();
                MainIngestFile.cptMaip.addAndGet(check);
                nb = 0;
            }
        }
        if (nb != 0) {
            bulk.execute();
            MainIngestFile.cptMaip.addAndGet(nb);
            nb = 0;
        }
    } else {
        // threads
        ExecutorService executorService = Executors.newFixedThreadPool(ingest.length);
        for (int i = 0; i < ingest.length; i++) {
            MainIngestMDBFromFile ingestrun = new MainIngestMDBFromFile();
            ingestrun.file = ingest[i];
            executorService.execute(ingestrun);
            Thread.sleep(200);
        }
        Thread.sleep(1000);
        executorService.shutdown();
        while (!executorService.awaitTermination(10000, TimeUnit.MILLISECONDS)) {
            ;
        }
        System.out.println("Load ended");
        final long nbBigM = dbvitam.getDaipSize();
        final long nbBigD = dbvitam.getPaipSize();
        System.out.println("\n Big Test (" + nbThread + " nb MAIP: " + MainIngestFile.cptMaip.get()
                + ") with MAIP: " + nbBigM + " DATA: " + nbBigD + " => Load:"
                + (loadt.get()) / ((float) MainIngestFile.cptMaip.get() * nbThread));

        System.out.println("\nThread;nbLoad;nbTotal;Load");
        System.out.println(nbThread + ";" + MainIngestFile.cptMaip.get() + ";" + nbBigM + ";"
                + (loadt.get()) / ((float) MainIngestFile.cptMaip.get() * nbThread));
    }
    final long date12 = System.currentTimeMillis();
    MainIngestMDBFromFile.loadt.set(date12 - date11);

    System.out.println("Load ended");
    /*
     * System.out.println("All elements\n================================================================");
     * DbVitam.printStructure(dbvitam);
     */
    final long nbBigM = dbvitam.getDaipSize();
    final long nbBigD = dbvitam.getPaipSize();
    System.out.println("\n Big Test (" + nbThread + " Threads chacune " + MainIngestFile.nb + " nb MAIP: "
            + MainIngestFile.cptMaip.get() + ") with MAIP: " + nbBigM + " DATA: " + nbBigD + " => Load:"
            + (MainIngestMDBFromFile.loadt.get()) / ((float) MainIngestFile.cptMaip.get()));

    System.out.println("\nThread;nbLoad;nbTotal;Load");
    System.out.println(nbThread + ";" + MainIngestFile.cptMaip.get() + ";" + nbBigM + ";"
            + (MainIngestMDBFromFile.loadt.get()) / ((float) MainIngestFile.cptMaip.get()));
}

From source file:org.lucee.mongodb.DBCollectionImpl.java

License:Open Source License

@Override
public Object call(PageContext pc, Key methodName, Object[] args) throws PageException {

    // aggregate//  w w  w  .  j a va2 s.  c o  m
    if (methodName.equals("aggregate")) {
        boolean hasOptions = false;
        AggregationOptions options = null;
        int len = checkArgLength("aggregate", args, 1, -1); // no length limitation
        List<DBObject> pipeline = new ArrayList<DBObject>();
        // Pipeline array as single argument
        if (len == 1 && decision.isArray(args[0])) {
            Array arr = caster.toArray(args[0]);
            if (arr.size() == 0)
                throw exp.createApplicationException(
                        "the array passed to the function aggregate needs at least 1 element");

            Iterator<Object> it = arr.valueIterator();
            while (it.hasNext()) {
                pipeline.add(toDBObject(it.next()));
            }
        } else {
            // First argument is pipeline of operations, second argument is struct of options --> returns cursor!
            if (len == 2 && decision.isArray(args[0]) && decision.isStruct(args[1])) {
                Array arr = caster.toArray(args[0]);
                Iterator<Object> it = arr.valueIterator();
                while (it.hasNext()) {
                    pipeline.add(toDBObject(it.next()));
                }

                hasOptions = true;
                // options builder
                AggregationOptions.Builder optbuilder = AggregationOptions.builder()
                        .outputMode(AggregationOptions.OutputMode.CURSOR);

                DBObject dboOpts = toDBObject(args[1]);
                if (dboOpts.containsField("allowDiskUse")) {
                    if (!decision.isBoolean(dboOpts.get("allowDiskUse")))
                        throw exp.createApplicationException("allowDiskUse in options must be boolean value");

                    optbuilder = optbuilder.allowDiskUse(caster.toBooleanValue(dboOpts.get("allowDiskUse")));
                }
                if (dboOpts.containsField("cursor")) {
                    if (!decision.isStruct(dboOpts.get("cursor")))
                        throw exp.createApplicationException(
                                "cursor in options must be struct with optional key batchSize");

                    DBObject cursoropts = toDBObject(dboOpts.get("cursor"));
                    if (cursoropts.containsField("batchSize")) {
                        if (!decision.isNumeric(cursoropts.get("batchSize")))
                            throw exp.createApplicationException("cursor.batchSize in options must be integer");

                        optbuilder = optbuilder.batchSize(caster.toIntValue(cursoropts.get("batchSize")));
                    }
                }

                options = optbuilder.build();
            }
            // First argument is first operation, second argument is array of additional operations
            else if (len == 2 && decision.isArray(args[1])) {
                Array arr = caster.toArray(args[1]);
                pipeline.add(toDBObject(args[0]));
                Iterator<Object> it = arr.valueIterator();
                while (it.hasNext()) {
                    pipeline.add(toDBObject(it.next()));
                }
            }
            // N arguments of pipeline operations
            else {
                for (int i = 0; i < len; i++) {
                    pipeline.add(toDBObject(args[i]));
                }
            }
        }

        if (hasOptions) {
            // returns Cursor - requires >= MongoDB 2.6
            return toCFML(coll.aggregate(pipeline, options));
        } else {
            // returns AggregationOutput
            return toCFML(coll.aggregate(pipeline));
        }
    }
    // count
    if (methodName.equals("count")) {
        int len = checkArgLength("count", args, 0, 1);
        if (len == 0) {
            return toCFML(coll.count());
        } else if (len == 1) {
            return toCFML(coll.count(toDBObject(args[0])));
        }
    }
    // dataSize
    if (methodName.equals("dataSize")) {
        checkArgLength("dataSize", args, 0, 0);
        return toCFML(coll.getStats().get("size"));
    }

    // distinct
    if (methodName.equals("distinct")) {
        int len = checkArgLength("distinct", args, 1, 2);
        if (len == 1) {
            return toCFML(coll.distinct(caster.toString(args[0])));
        } else if (len == 2) {
            return toCFML(coll.distinct(caster.toString(args[0]), toDBObject(args[1])));
        }
    }
    // drop
    if (methodName.equals("drop")) {
        checkArgLength("drop", args, 0, 0);
        coll.drop();
        return null;
    }

    // dropIndex
    if (methodName.equals("dropIndex")) {
        checkArgLength("dropIndex", args, 1, 1);
        DBObject dbo = toDBObject(args[0], null);
        if (dbo != null)
            coll.dropIndex(dbo);
        else
            coll.dropIndex(caster.toString(args[0]));

        return null;
    }
    // dropIndexes
    if (methodName.equals("dropIndexes")) {
        int len = checkArgLength("dropIndexes", args, 0, 1);
        if (len == 0) {
            coll.dropIndexes();
            return null;
        } else if (len == 1) {
            coll.dropIndexes(caster.toString(args[0]));
            return null;
        }
    }

    // createIndex
    if (methodName.equals("createIndex") || methodName.equals("ensureIndex")) {
        int len = checkArgLength("createIndex", args, 1, 3);
        if (len == 1) {
            DBObject dbo = toDBObject(args[0], null);
            if (dbo != null)
                coll.createIndex(dbo);
            else
                coll.createIndex(caster.toString(args[0]));
            return null;
        }
        if (len == 2) {
            DBObject p1 = toDBObject(args[0]);
            DBObject p2 = toDBObject(args[1], null);
            if (p2 != null)
                coll.createIndex(p1, p2);
            else
                coll.createIndex(p1, caster.toString(args[1]));
            return null;
        } else if (len == 3) {
            coll.createIndex(toDBObject(args[0]), caster.toString(args[1]), caster.toBooleanValue(args[2]));
            return null;
        }
    }

    // getStats
    if (methodName.equals("getStats") || methodName.equals("stats")) {
        checkArgLength("getStats", args, 0, 0);
        return toCFML(coll.getStats());
    }

    // getIndexes
    if (methodName.equals("getIndexes") || methodName.equals("getIndexInfo")) {
        checkArgLength(methodName.getString(), args, 0, 0);
        return toCFML(coll.getIndexInfo());
    }

    // getWriteConcern
    if (methodName.equals("getWriteConcern")) {
        checkArgLength("getWriteConcern", args, 0, 0);
        return toCFML(coll.getWriteConcern());
    }

    // find
    if (methodName.equals("find")) {
        int len = checkArgLength("find", args, 0, 3);
        DBCursor cursor = null;
        if (len == 0) {
            cursor = coll.find();
        } else if (len == 1) {
            cursor = coll.find(toDBObject(args[0]));
        } else if (len == 2) {
            cursor = coll.find(toDBObject(args[0]), toDBObject(args[1]));
        } else if (len == 3) {
            cursor = coll.find(toDBObject(args[0]), toDBObject(args[1])).skip(caster.toIntValue(args[2]));
        }

        return toCFML(cursor);
    }
    // findOne
    else if (methodName.equals("findOne")) {
        int len = checkArgLength("findOne", args, 0, 3);
        DBObject obj = null;
        if (len == 0) {
            obj = coll.findOne();
        } else if (len == 1) {
            DBObject arg1 = toDBObject(args[0], null);
            if (arg1 != null)
                obj = coll.findOne(arg1);
            else
                obj = coll.findOne(args[0]);

        } else if (len == 2) {
            DBObject arg1 = toDBObject(args[0], null);
            if (arg1 != null)
                obj = coll.findOne(arg1, toDBObject(args[1]));
            else
                obj = coll.findOne(args[0], toDBObject(args[1]));
        } else if (len == 3) {
            obj = coll.findOne(toDBObject(args[0]), toDBObject(args[1]), toDBObject(args[2]));
        }
        return toCFML(obj);
    }
    // findAndRemove
    if (methodName.equals("findAndRemove")) {
        checkArgLength("findAndRemove", args, 1, 1);
        DBObject obj = coll.findAndRemove(toDBObject(args[0]));
        return toCFML(obj);
    }
    // findAndModify
    if (methodName.equals("findAndModify")) {
        int len = args == null ? 0 : args.length;
        if (len != 2 && len != 3 && len != 7) {
            throw exp.createApplicationException(
                    "the function findAndModify needs 2, 3 or 7 arguments, but you have defined only " + len);
        }
        DBObject obj = null;
        if (len == 2) {
            obj = coll.findAndModify(toDBObject(args[0]), toDBObject(args[1]));
        } else if (len == 3) {
            obj = coll.findAndModify(toDBObject(args[0]), toDBObject(args[1]), toDBObject(args[2]));
        } else if (len == 7) {
            obj = coll.findAndModify(toDBObject(args[0]), toDBObject(args[1]), toDBObject(args[2]),
                    caster.toBooleanValue(args[3]), toDBObject(args[4]), caster.toBooleanValue(args[5]),
                    caster.toBooleanValue(args[6]));
        }

        return toCFML(obj);
    }

    //group
    /*
       TODO: needs GroupCommand
    if(methodName.equals("group")) {
       int len=checkArgLength("group",args,1,1);
       if(len==1){
    return toCFML(coll.group(
       toDBObject(args[0])
    ));
       }
    }*/

    // insert
    if (methodName.equals("insert")) {
        checkArgLength("insert", args, 1, 1);
        return toCFML(coll.insert(toDBObjectArray(args[0])));
    }

    // insertMany(required array documents, struct options) valid options keys are string "writeconcern", boolean "ordered"
    if (methodName.equals("insertMany")) {
        int len = checkArgLength("insertMany", args, 1, 2);
        BulkWriteOperation bulk = coll.initializeOrderedBulkOperation();
        WriteConcern wc = coll.getWriteConcern();

        if (len == 2) {
            DBObject dboOpts = toDBObject(args[1]);
            if (dboOpts.containsField("ordered")) {
                if (!decision.isBoolean(dboOpts.get("ordered")))
                    throw exp.createApplicationException("ordered in options must be boolean value");

                if (!caster.toBooleanValue(dboOpts.get("ordered"))) {
                    bulk = coll.initializeUnorderedBulkOperation();
                }
            }

            if (dboOpts.containsField("writeconcern")) {
                WriteConcern newWc = WriteConcern.valueOf(caster.toString(dboOpts.get("writeconcern")));
                if (newWc != null) {
                    wc = newWc;
                }
            }
        }
        Map<String, Object> result = new LinkedHashMap<String, Object>();
        BulkWriteResult bulkResult;
        List<Map> writeErrors = new ArrayList<Map>();

        Array arr = caster.toArray(args[0]);
        if (arr.size() == 0) {
            result.put("nInserted", 0);
            result.put("writeErrors", writeErrors);
            result.put("acknowledged", true);
            return toCFML(result);
        }

        Iterator<Object> it = arr.valueIterator();
        while (it.hasNext()) {
            bulk.insert(toDBObject(it.next()));
        }

        try {
            bulkResult = bulk.execute(wc);
        } catch (BulkWriteException e) {
            Map<String, Object> bulkErrorItem;
            BulkWriteError bulkError;

            bulkResult = e.getWriteResult();
            List<BulkWriteError> errors = e.getWriteErrors();

            Iterator<BulkWriteError> jj = errors.iterator();
            while (jj.hasNext()) {
                bulkErrorItem = new LinkedHashMap<String, Object>();
                bulkError = jj.next();
                bulkErrorItem.put("index", (bulkError.getIndex() + 1)); // +1 so we get index of item in CFML array
                bulkErrorItem.put("code", bulkError.getCode());
                bulkErrorItem.put("errmsg", bulkError.getMessage());
                bulkErrorItem.put("op", bulkError.getDetails());
                writeErrors.add(bulkErrorItem);
            }
        }

        result.put("acknowledged", bulkResult.isAcknowledged());
        if (bulkResult.isAcknowledged()) {
            result.put("nInserted", bulkResult.getInsertedCount());
            result.put("writeErrors", writeErrors);
        }

        return toCFML(result);
    }

    // bulkWrite(required array operations, struct options) valid options keys are string "writeconcern", boolean "ordered", boolean "bypassDocumentValidation"
    // an operation is a struct with the following keys: { "operation":[insert|update|updateOne|remove|removeOne], "document":[(required if operation is insert) - a doc to insert], "query":[(optional) - the query to find for remove/update operations], "update":[(required for update/updateOne) - the update document] }
    // i.e. dbCollection.bulkWrite([
    //       {"operation":"insert", "document":{"test":"insert"}}
    //       ,{"operation":"updateOne", "query":{"_id":"foo"}, "update":{"$set":{"updated":true}}}         
    //       ,{"operation":"removeOne", "query":{"_id":"goaway"}}         
    // ],{"ordered":false})
    if (methodName.equals("bulkWrite")) {
        int len = checkArgLength("bulkWrite", args, 1, 2);
        BulkWriteOperation bulk = coll.initializeOrderedBulkOperation();
        WriteConcern wc = coll.getWriteConcern();

        if (len == 2) {
            DBObject dboOpts = toDBObject(args[1]);
            if (dboOpts.containsField("ordered")) {
                if (!decision.isBoolean(dboOpts.get("ordered")))
                    throw exp.createApplicationException("ordered in options must be boolean value");

                if (!caster.toBooleanValue(dboOpts.get("ordered"))) {
                    bulk = coll.initializeUnorderedBulkOperation();
                }
            }

            if (dboOpts.containsField("bypassDocumentValidation")) {
                if (!decision.isBoolean(dboOpts.get("bypassDocumentValidation")))
                    throw exp.createApplicationException(
                            "bypassDocumentValidation in options must be boolean value");

                bulk.setBypassDocumentValidation(
                        caster.toBooleanValue(dboOpts.get("bypassDocumentValidation")));
            }

            if (dboOpts.containsField("writeconcern")) {
                WriteConcern newWc = WriteConcern.valueOf(caster.toString(dboOpts.get("writeconcern")));
                if (newWc != null) {
                    wc = newWc;
                }
            }
        }
        Map<String, Object> result = new LinkedHashMap<String, Object>();
        BulkWriteResult bulkResult;
        List<Map> writeErrors = new ArrayList<Map>();

        Array arr = caster.toArray(args[0]);
        if (arr.size() == 0) {
            result.put("nInserted", 0);
            result.put("nMatched", 0);
            result.put("nModified", 0);
            result.put("nRemoved", 0);
            result.put("writeErrors", writeErrors);
            result.put("acknowledged", true);
            return toCFML(result);
        }

        Iterator<Object> it = arr.valueIterator();
        while (it.hasNext()) {

            DBObject operation = toDBObject(it.next());

            if (operation.get("operation") == "update") {
                // do stuff to add update operation
                bulk.find(toDBObject(operation.get("query"))).update(toDBObject(operation.get("update")));
            } else if (operation.get("operation") == "updateOne") {
                // do stuff to add updateOne operation
                bulk.find(toDBObject(operation.get("query"))).updateOne(toDBObject(operation.get("update")));
            } else if (operation.get("operation") == "remove") {
                // do stuff to add remove operation
                bulk.find(toDBObject(operation.get("query"))).remove();
            } else if (operation.get("operation") == "removeOne") {
                // do stuff to add removeOne operation
                bulk.find(toDBObject(operation.get("query"))).removeOne();
            } else if (operation.get("operation") == "insert") {
                bulk.insert(toDBObject(operation.get("document")));
            }

        }

        try {
            bulkResult = bulk.execute(wc);
        } catch (BulkWriteException e) {
            Map<String, Object> bulkErrorItem;
            BulkWriteError bulkError;

            bulkResult = e.getWriteResult();
            List<BulkWriteError> errors = e.getWriteErrors();

            Iterator<BulkWriteError> jj = errors.iterator();
            while (jj.hasNext()) {
                bulkErrorItem = new LinkedHashMap<String, Object>();
                bulkError = jj.next();
                bulkErrorItem.put("index", (bulkError.getIndex() + 1)); // +1 so we get index of item in CFML array
                bulkErrorItem.put("code", bulkError.getCode());
                bulkErrorItem.put("errmsg", bulkError.getMessage());
                bulkErrorItem.put("op", bulkError.getDetails());
                writeErrors.add(bulkErrorItem);
            }
        }

        result.put("acknowledged", bulkResult.isAcknowledged());
        if (bulkResult.isAcknowledged()) {
            result.put("nInserted", bulkResult.getInsertedCount());
            result.put("nMatched", bulkResult.getMatchedCount());
            result.put("nModified", bulkResult.getModifiedCount());
            result.put("nRemoved", bulkResult.getRemovedCount());
            result.put("writeErrors", writeErrors);
        }

        return toCFML(result);
    }

    //mapReduce
    if (methodName.equals("mapReduce")) {
        int len = checkArgLength("mapReduce", args, 4, 4);
        if (len == 4) {
            return toCFML(coll.mapReduce(caster.toString(args[0]), caster.toString(args[1]),
                    caster.toString(args[2]), toDBObject(args[3])));
        }
    }

    // remove
    if (methodName.equals("remove")) {
        checkArgLength("remove", args, 1, 1);
        return toCFML(coll.remove(toDBObject(args[0])));

    }

    // rename
    if (methodName.equals("rename") || methodName.equals("renameCollection")) {
        int len = checkArgLength(methodName.getString(), args, 1, 2);
        if (len == 1) {
            return toCFML(coll.rename(caster.toString(args[0])));
        } else if (len == 2) {
            return toCFML(coll.rename(caster.toString(args[0]), caster.toBooleanValue(args[1])));
        }
    }

    // save
    if (methodName.equals("save")) {
        checkArgLength("save", args, 1, 1);
        return toCFML(coll.save(toDBObject(args[0])));
    }

    // setWriteConcern
    if (methodName.equals("setWriteConcern")) {
        checkArgLength("setWriteConcern", args, 1, 1);
        WriteConcern wc = WriteConcern.valueOf(caster.toString(args[0]));
        if (wc != null) {
            coll.setWriteConcern(wc);
        }
        return null;
    }

    // storageSize
    if (methodName.equals("storageSize")) {
        checkArgLength("storageSize", args, 0, 0);
        return toCFML(coll.getStats().get("storageSize"));
    }

    // totalIndexSize
    if (methodName.equals("totalIndexSize")) {
        checkArgLength("totalIndexSize", args, 0, 0);
        return toCFML(coll.getStats().get("totalIndexSize"));
    }

    // update
    if (methodName.equals("update")) {
        int len = checkArgLength("update", args, 2, 4);
        if (len == 2) {
            return toCFML(coll.update(toDBObject(args[0]), toDBObject(args[1])));
        } else if (len == 3) {
            return toCFML(coll.update(toDBObject(args[0]), toDBObject(args[1]), caster.toBooleanValue(args[2]),
                    false));
        } else if (len == 4) {
            return toCFML(coll.update(toDBObject(args[0]), toDBObject(args[1]), caster.toBooleanValue(args[2]),
                    caster.toBooleanValue(args[3])));
        }
    }

    String functionNames = "aggregate,count,dataSize,distinct,drop,dropIndex,dropIndexes,createIndex,stats,getIndexes,getWriteConcern,find,findOne,findAndRemove,findAndModify,"
            + "group,insert,insertMany,bulkWrite,mapReduce,remove,rename,save,setWriteConcern,storageSize,totalIndexSize,update";

    throw exp.createApplicationException(
            "function " + methodName + " does not exist existing functions are [" + functionNames + "]");
}

From source file:org.sglover.entities.dao.mongo.MongoEntitiesDAO.java

License:Open Source License

private void addEntities(Node node, String type, String key, Collection<Entity<String>> entities) {
    BulkWriteOperation bulk = entitiesData.initializeUnorderedBulkOperation();

    String nodeId = node.getNodeId();
    long nodeInternalId = node.getNodeInternalId();
    String nodeVersion = node.getVersionLabel();

    if (entities.size() > 0) {
        int expected = entities.size();
        for (Entity<String> nameEntity : entities) {
            List<EntityLocation> locations = nameEntity.getLocations();
            List<DBObject> locs = new LinkedList<>();
            for (EntityLocation location : locations) {
                long start = location.getBeginOffset();
                long end = location.getEndOffset();
                String context = location.getContext();
                double probability = location.getProbability();

                DBObject locDBObject = BasicDBObjectBuilder.start("s", start).add("e", end)
                        .add("p", probability).add("c", context).get();
                locs.add(locDBObject);//  www  .  ja v  a 2  s .com
            }

            DBObject dbObject = BasicDBObjectBuilder.start("n", nodeId).add("ni", nodeInternalId)
                    .add("v", nodeVersion).add("t", type).add(key, nameEntity.getEntity())
                    .add("c", nameEntity.getCount()).add("locs", locs).get();
            bulk.insert(dbObject);
        }

        BulkWriteResult result = bulk.execute();
        int inserted = result.getInsertedCount();

        if (expected != inserted) {
            throw new RuntimeException("Mongo write failed");
        }
    }
}