Example usage for com.mongodb BulkWriteOperation find

List of usage examples for com.mongodb BulkWriteOperation find

Introduction

In this page you can find the example usage for com.mongodb BulkWriteOperation find.

Prototype

public BulkWriteRequestBuilder find(final DBObject query) 

Source Link

Document

Start building a write request to add to the bulk write operation.

Usage

From source file:com.redhat.lightblue.mongo.crud.BasicDocDeleter.java

License:Open Source License

@Override
public void delete(CRUDOperationContext ctx, DBCollection collection, DBObject mongoQuery,
        CRUDDeleteResponse response) {//from   ww w .  ja  v  a  2 s . co  m
    LOGGER.debug("Removing docs with {}", mongoQuery);

    int numDeleted = 0;

    if (!hookOptimization || ctx.getHookManager().hasHooks(ctx, CRUDOperation.DELETE)) {
        LOGGER.debug("There are hooks, retrieve-delete");
        try (DBCursor cursor = collection.find(mongoQuery, null)) {
            // Set read preference to primary for read-for-update operations
            cursor.setReadPreference(ReadPreference.primary());

            // All docs, to be put into the context
            ArrayList<DocCtx> contextDocs = new ArrayList<>();
            // ids to delete from the db
            List<Object> idsToDelete = new ArrayList<>(batchSize);
            while (cursor.hasNext()) {

                // We will use this index to access the documents deleted in this batch
                int thisBatchIndex = contextDocs.size();
                if (idsToDelete.size() < batchSize) {
                    // build batch
                    DBObject doc = cursor.next();
                    DocTranslator.TranslatedDoc tdoc = translator.toJson(doc);
                    DocCtx docCtx = new DocCtx(tdoc.doc, tdoc.rmd);
                    docCtx.setOriginalDocument(docCtx);
                    docCtx.setCRUDOperationPerformed(CRUDOperation.DELETE);
                    contextDocs.add(docCtx);
                    idsToDelete.add(doc.get(MongoCRUDController.ID_STR));
                }

                if (idsToDelete.size() == batchSize || !cursor.hasNext()) {
                    // batch built or run out of documents                        
                    BulkWriteOperation bw = collection.initializeUnorderedBulkOperation();

                    for (Object id : idsToDelete) {
                        // doing a bulk of single operations instead of removing by initial query
                        // that way we know which documents were not removed
                        bw.find(new BasicDBObject("_id", id)).remove();
                    }

                    BulkWriteResult result = null;
                    try {
                        if (writeConcern == null) {
                            LOGGER.debug("Bulk deleting docs");
                            result = bw.execute();
                        } else {
                            LOGGER.debug("Bulk deleting docs with writeConcern={} from execution",
                                    writeConcern);
                            result = bw.execute(writeConcern);
                        }
                        LOGGER.debug("Bulk deleted docs - attempted {}, deleted {}", idsToDelete.size(),
                                result.getRemovedCount());
                    } catch (BulkWriteException bwe) {
                        LOGGER.error("Bulk write exception", bwe);
                        handleBulkWriteError(bwe.getWriteErrors(),
                                contextDocs.subList(thisBatchIndex, contextDocs.size()));
                        result = bwe.getWriteResult();
                    } catch (RuntimeException e) {
                        LOGGER.error("Exception", e);
                        throw e;
                    } finally {

                        numDeleted += result.getRemovedCount();
                        // clear list before processing next batch
                        idsToDelete.clear();
                    }
                }
            }
            ctx.setDocumentStream(new ListDocumentStream<DocCtx>(contextDocs));
        }
    } else {
        LOGGER.debug("There are no hooks, deleting in bulk");
        try {
            if (writeConcern == null) {
                numDeleted = collection.remove(mongoQuery).getN();
            } else {
                numDeleted = collection.remove(mongoQuery, writeConcern).getN();
            }
        } catch (MongoException e) {
            LOGGER.error("Deletion error", e);
            throw e;
        }
        ctx.setDocumentStream(new ListDocumentStream<DocCtx>(new ArrayList<DocCtx>()));
    }

    response.setNumDeleted(numDeleted);
}

From source file:com.redhat.lightblue.mongo.crud.MongoSafeUpdateProtocol.java

License:Open Source License

private List<Integer> retryFailedDocs(List<Integer> failedDocs, CommitInfo ci) {
    List<Integer> newFailedDocs = new ArrayList<>(failedDocs.size());
    for (Integer index : failedDocs) {
        BatchDoc doc = batch.get(index);
        // Read the doc
        DBObject findQuery = new BasicDBObject("_id", doc.id);
        if (cfg.isReevaluateQueryForRetry()) {
            if (query != null) {
                List<DBObject> list = new ArrayList<>(2);
                list.add(findQuery);//w w w. j  a  v  a  2s.  co m
                list.add(query);
                findQuery = new BasicDBObject("$and", list);
            }
        }
        DBObject updatedDoc = collection.findOne(findQuery);
        if (updatedDoc != null) {
            // if updatedDoc is null, doc is lost. Error remains
            DBObject newDoc = reapplyChanges(index, updatedDoc);
            // Make sure reapplyChanges does not insert references
            // of objects from the old document into the
            // updatedDoc. That updates both copies of
            // documents. Use deepCopy
            if (newDoc != null) {
                DBObject replaceQuery = writeReplaceQuery(updatedDoc);
                // Update the doc ver to our doc ver. This doc is here
                // because its docVer is not set to our docver, so
                // this is ok
                DocVerUtil.setDocVer(newDoc, docVer);
                // Using bulkwrite here with one doc to use the
                // findAndReplace API, which is lacking in
                // DBCollection
                BulkWriteOperation nestedBwo = collection.initializeUnorderedBulkOperation();
                nestedBwo.find(replaceQuery).replaceOne(newDoc);
                try {
                    if (nestedBwo.execute().getMatchedCount() == 1) {
                        if (LOGGER.isDebugEnabled()) {
                            LOGGER.debug("Successfully retried to update a doc: replaceQuery={} newDoc={}",
                                    replaceQuery, newDoc);
                        }
                        // Successful update
                        ci.errors.remove(index);
                    }
                } catch (Exception e) {
                    if (LOGGER.isDebugEnabled()) {
                        LOGGER.debug("Failed retrying to update a doc: replaceQuery={} newDoc={} error={}",
                                replaceQuery, newDoc, e.toString());
                    }
                    newFailedDocs.add(index);
                }
            } else {
                // reapllyChanges removed the doc from the resultset
                ci.errors.remove(index);
            }
        } else {
            // Doc no longer exists
            if (LOGGER.isDebugEnabled()) {
                LOGGER.debug("Removing doc id={} from retry queue, because it does not exist or match anymore",
                        index);
            }
            ci.errors.remove(index);
            ci.lostDocs.add(index);
        }
    }
    return newFailedDocs;
}

From source file:com.zjy.mongo.output.MongoOutputCommitter.java

License:Apache License

@Override
public void commitTask(final TaskAttemptContext taskContext) throws IOException {
    LOG.info("Committing task.");

    collections = MongoConfigUtil.getOutputCollections(taskContext.getConfiguration());
    numberOfHosts = collections.size();//from  w w w. j a v  a  2  s. c o m

    // Get temporary file.
    Path tempFilePath = getTaskAttemptPath(taskContext);
    LOG.info("Committing from temporary file: " + tempFilePath.toString());
    long filePos = 0, fileLen;
    FSDataInputStream inputStream = null;
    try {
        FileSystem fs = FileSystem.get(taskContext.getConfiguration());
        inputStream = fs.open(tempFilePath);
        fileLen = fs.getFileStatus(tempFilePath).getLen();
    } catch (IOException e) {
        LOG.error("Could not open temporary file for committing", e);
        cleanupAfterCommit(inputStream, taskContext);
        throw e;
    }

    int maxDocs = MongoConfigUtil.getBatchSize(taskContext.getConfiguration());
    int curBatchSize = 0;
    DBCollection coll = getDbCollectionByRoundRobin();
    BulkWriteOperation bulkOp = coll.initializeOrderedBulkOperation();

    // Read Writables out of the temporary file.
    BSONWritable bw = new BSONWritable();
    MongoUpdateWritable muw = new MongoUpdateWritable();
    while (filePos < fileLen) {
        try {
            // Determine writable type, and perform corresponding operation
            // on MongoDB.
            int mwType = inputStream.readInt();
            if (MongoWritableTypes.BSON_WRITABLE == mwType) {
                bw.readFields(inputStream);
                bulkOp.insert(new BasicDBObject(bw.getDoc().toMap()));
            } else if (MongoWritableTypes.MONGO_UPDATE_WRITABLE == mwType) {
                muw.readFields(inputStream);
                DBObject query = new BasicDBObject(muw.getQuery().toMap());
                DBObject modifiers = new BasicDBObject(muw.getModifiers().toMap());
                if (muw.isMultiUpdate()) {
                    if (muw.isUpsert()) {
                        bulkOp.find(query).upsert().update(modifiers);
                    } else {
                        bulkOp.find(query).update(modifiers);
                    }
                } else {
                    if (muw.isUpsert()) {
                        bulkOp.find(query).upsert().updateOne(modifiers);
                    } else {
                        bulkOp.find(query).updateOne(modifiers);
                    }
                }
            } else {
                throw new IOException("Unrecognized type: " + mwType);
            }
            filePos = inputStream.getPos();
            // Write to MongoDB if the batch is full, or if this is the last
            // operation to be performed for the Task.
            if (++curBatchSize >= maxDocs || filePos >= fileLen) {
                try {
                    bulkOp.execute();
                } catch (MongoException e) {
                    LOG.error("Could not write to MongoDB", e);
                    throw e;
                }
                coll = getDbCollectionByRoundRobin();
                bulkOp = coll.initializeOrderedBulkOperation();
                curBatchSize = 0;

                // Signal progress back to Hadoop framework so that we
                // don't time out.
                taskContext.progress();
            }
        } catch (IOException e) {
            LOG.error("Error reading from temporary file", e);
            throw e;
        }
    }

    cleanupAfterCommit(inputStream, taskContext);
}

From source file:edu.csulaerp.db.ReferenceMongo.java

License:Apache License

/**
 * Run this main method to see the output of this quick example.
 *
 * @param args takes no args//from  ww w.  ja  va  2  s  . c om
 * @throws UnknownHostException if it cannot connect to a MongoDB instance at localhost:27017
 */
public static void main(final String[] args) throws UnknownHostException {
    // connect to the local database server
    MongoClient mongoClient = new MongoClient();

    /*
    // Authenticate - optional
    MongoCredential credential = MongoCredential.createMongoCRCredential(userName, database, password);
    MongoClient mongoClient = new MongoClient(new ServerAddress(), Arrays.asList(credential));
    */

    // get handle to "mydb"
    DB db = mongoClient.getDB("mydb");

    // get a list of the collections in this database and print them out
    Set<String> collectionNames = db.getCollectionNames();
    for (final String s : collectionNames) {
        System.out.println(s);
    }

    // get a collection object to work with
    DBCollection coll = db.getCollection("testCollection");

    // drop all the data in it
    coll.drop();

    // make a document and insert it
    BasicDBObject doc = new BasicDBObject("name", "MongoDB").append("type", "database").append("count", 1)
            .append("info", new BasicDBObject("x", 203).append("y", 102));

    coll.insert(doc);

    // get it (since it's the only one in there since we dropped the rest earlier on)
    DBObject myDoc = coll.findOne();
    System.out.println(myDoc);

    // now, lets add lots of little documents to the collection so we can explore queries and cursors
    for (int i = 0; i < 100; i++) {
        coll.insert(new BasicDBObject().append("i", i));
    }
    System.out
            .println("total # of documents after inserting 100 small ones (should be 101) " + coll.getCount());

    // lets get all the documents in the collection and print them out
    DBCursor cursor = coll.find();
    try {
        while (cursor.hasNext()) {
            System.out.println(cursor.next());
        }
    } finally {
        cursor.close();
    }

    // now use a query to get 1 document out
    BasicDBObject query = new BasicDBObject("i", 71);
    cursor = coll.find(query);

    try {
        while (cursor.hasNext()) {
            System.out.println(cursor.next());
        }
    } finally {
        cursor.close();
    }

    // $ Operators are represented as strings
    query = new BasicDBObject("j", new BasicDBObject("$ne", 3)).append("k", new BasicDBObject("$gt", 10));

    cursor = coll.find(query);

    try {
        while (cursor.hasNext()) {
            System.out.println(cursor.next());
        }
    } finally {
        cursor.close();
    }

    // now use a range query to get a larger subset
    // find all where i > 50
    query = new BasicDBObject("i", new BasicDBObject("$gt", 50));
    cursor = coll.find(query);

    try {
        while (cursor.hasNext()) {
            System.out.println(cursor.next());
        }
    } finally {
        cursor.close();
    }

    // range query with multiple constraints
    query = new BasicDBObject("i", new BasicDBObject("$gt", 20).append("$lte", 30));
    cursor = coll.find(query);

    try {
        while (cursor.hasNext()) {
            System.out.println(cursor.next());
        }
    } finally {
        cursor.close();
    }

    // Count all documents in a collection but take a maximum second to do so
    coll.find().maxTime(1, SECONDS).count();

    // Bulk operations
    BulkWriteOperation builder = coll.initializeOrderedBulkOperation();
    builder.insert(new BasicDBObject("_id", 1));
    builder.insert(new BasicDBObject("_id", 2));
    builder.insert(new BasicDBObject("_id", 3));

    builder.find(new BasicDBObject("_id", 1)).updateOne(new BasicDBObject("$set", new BasicDBObject("x", 2)));
    builder.find(new BasicDBObject("_id", 2)).removeOne();
    builder.find(new BasicDBObject("_id", 3)).replaceOne(new BasicDBObject("_id", 3).append("x", 4));

    BulkWriteResult result = builder.execute();
    System.out.println("Ordered bulk write result : " + result);

    // Unordered bulk operation - no guarantee of order of operation
    builder = coll.initializeUnorderedBulkOperation();
    builder.find(new BasicDBObject("_id", 1)).removeOne();
    builder.find(new BasicDBObject("_id", 2)).removeOne();

    result = builder.execute();
    System.out.println("Ordered bulk write result : " + result);

    // parallelScan
    ParallelScanOptions parallelScanOptions = ParallelScanOptions.builder().numCursors(3).batchSize(300)
            .build();

    List<Cursor> cursors = coll.parallelScan(parallelScanOptions);
    for (Cursor pCursor : cursors) {
        while (pCursor.hasNext()) {
            System.out.println(pCursor.next());
        }
    }

    // release resources
    db.dropDatabase();
    mongoClient.close();
}

From source file:edu.umass.cs.gnsserver.database.MongoRecords.java

License:Apache License

/**
 *
 * @param collectionName// w w w .j a v  a 2s .c  o  m
 * @param values
 * @throws FailedDBOperationException
 * @throws RecordExistsException
 */
public void bulkUpdate(String collectionName, Map<String, JSONObject> values)
        throws FailedDBOperationException, RecordExistsException {
    //String primaryKey = mongoCollectionSpecs.getCollectionSpec(collectionName).getPrimaryKey().getName();
    DBCollection collection = db.getCollection(collectionName);
    String primaryKey = mongoCollectionSpecs.getCollectionSpec(collectionName).getPrimaryKey().getName();
    db.requestEnsureConnection();
    BulkWriteOperation unordered = collection.initializeUnorderedBulkOperation();
    for (Map.Entry<String, JSONObject> entry : values.entrySet()) {
        BasicDBObject query = new BasicDBObject(primaryKey, entry.getKey());
        JSONObject value = entry.getValue();
        if (value != null) {
            DBObject document;
            try {
                document = (DBObject) JSON.parse(value.toString());
            } catch (Exception e) {
                throw new FailedDBOperationException(collectionName, "bulkUpdate",
                        "Unable to parse json" + e.getMessage());
            }
            unordered.find(query).upsert().replaceOne(document);
        } else {
            unordered.find(query).removeOne();
        }
    }
    // Maybe check the result?
    unordered.execute();
}

From source file:io.hipstogram.trident.mongodb.operation.Update.java

License:Apache License

@Override
public void addToBulkOperation(BulkWriteOperation bulk) {
    bulk.find(query).update(statement);
}

From source file:io.hipstogram.trident.mongodb.operation.Upsert.java

License:Apache License

@Override
public void addToBulkOperation(BulkWriteOperation bulk) {
    bulk.find(query).upsert().update(statement);
}

From source file:org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentStore.java

License:Apache License

private <T extends Document> BulkUpdateResult sendBulkUpdate(Collection<T> collection,
        java.util.Collection<UpdateOp> updateOps, Map<String, T> oldDocs) {
    DBCollection dbCollection = getDBCollection(collection);
    BulkWriteOperation bulk = dbCollection.initializeUnorderedBulkOperation();
    String[] bulkIds = new String[updateOps.size()];
    int i = 0;/* ww w  . j  a v  a 2  s . c  o m*/
    for (UpdateOp updateOp : updateOps) {
        String id = updateOp.getId();
        QueryBuilder query = createQueryForUpdate(id, updateOp.getConditions());
        T oldDoc = oldDocs.get(id);
        DBObject update;
        if (oldDoc == null) {
            query.and(Document.MOD_COUNT).exists(false);
            update = createUpdate(updateOp, true);
        } else {
            query.and(Document.MOD_COUNT).is(oldDoc.getModCount());
            update = createUpdate(updateOp, false);
        }
        bulk.find(query.get()).upsert().updateOne(update);
        bulkIds[i++] = id;
    }

    BulkWriteResult bulkResult;
    Set<String> failedUpdates = new HashSet<String>();
    Set<String> upserts = new HashSet<String>();
    try {
        bulkResult = bulk.execute();
    } catch (BulkWriteException e) {
        bulkResult = e.getWriteResult();
        for (BulkWriteError err : e.getWriteErrors()) {
            failedUpdates.add(bulkIds[err.getIndex()]);
        }
    }
    for (BulkWriteUpsert upsert : bulkResult.getUpserts()) {
        upserts.add(bulkIds[upsert.getIndex()]);
    }
    return new BulkUpdateResult(failedUpdates, upserts);
}

From source file:org.fastmongo.odm.repository.MongoTemplate.java

License:Apache License

@Override
public BulkWriteResult save(Class<?> collectionClass, Collection<DBObject> dbObjects) {
    BulkWriteOperation bulk = getCollection(collectionClass).initializeUnorderedBulkOperation();

    for (DBObject dbObject : dbObjects) {
        DBObject idQuery = new BasicDBObject();
        idQuery.put(OBJECT_ID_KEY, dbObject.get(OBJECT_ID_KEY));
        bulk.find(idQuery).upsert().replaceOne(dbObject);
    }// w w  w  .j a  va 2 s  .c o  m

    return bulk.execute();
}

From source file:org.lucee.mongodb.DBCollectionImpl.java

License:Open Source License

@Override
public Object call(PageContext pc, Key methodName, Object[] args) throws PageException {

    // aggregate// www  . ja  va 2 s. c  om
    if (methodName.equals("aggregate")) {
        boolean hasOptions = false;
        AggregationOptions options = null;
        int len = checkArgLength("aggregate", args, 1, -1); // no length limitation
        List<DBObject> pipeline = new ArrayList<DBObject>();
        // Pipeline array as single argument
        if (len == 1 && decision.isArray(args[0])) {
            Array arr = caster.toArray(args[0]);
            if (arr.size() == 0)
                throw exp.createApplicationException(
                        "the array passed to the function aggregate needs at least 1 element");

            Iterator<Object> it = arr.valueIterator();
            while (it.hasNext()) {
                pipeline.add(toDBObject(it.next()));
            }
        } else {
            // First argument is pipeline of operations, second argument is struct of options --> returns cursor!
            if (len == 2 && decision.isArray(args[0]) && decision.isStruct(args[1])) {
                Array arr = caster.toArray(args[0]);
                Iterator<Object> it = arr.valueIterator();
                while (it.hasNext()) {
                    pipeline.add(toDBObject(it.next()));
                }

                hasOptions = true;
                // options builder
                AggregationOptions.Builder optbuilder = AggregationOptions.builder()
                        .outputMode(AggregationOptions.OutputMode.CURSOR);

                DBObject dboOpts = toDBObject(args[1]);
                if (dboOpts.containsField("allowDiskUse")) {
                    if (!decision.isBoolean(dboOpts.get("allowDiskUse")))
                        throw exp.createApplicationException("allowDiskUse in options must be boolean value");

                    optbuilder = optbuilder.allowDiskUse(caster.toBooleanValue(dboOpts.get("allowDiskUse")));
                }
                if (dboOpts.containsField("cursor")) {
                    if (!decision.isStruct(dboOpts.get("cursor")))
                        throw exp.createApplicationException(
                                "cursor in options must be struct with optional key batchSize");

                    DBObject cursoropts = toDBObject(dboOpts.get("cursor"));
                    if (cursoropts.containsField("batchSize")) {
                        if (!decision.isNumeric(cursoropts.get("batchSize")))
                            throw exp.createApplicationException("cursor.batchSize in options must be integer");

                        optbuilder = optbuilder.batchSize(caster.toIntValue(cursoropts.get("batchSize")));
                    }
                }

                options = optbuilder.build();
            }
            // First argument is first operation, second argument is array of additional operations
            else if (len == 2 && decision.isArray(args[1])) {
                Array arr = caster.toArray(args[1]);
                pipeline.add(toDBObject(args[0]));
                Iterator<Object> it = arr.valueIterator();
                while (it.hasNext()) {
                    pipeline.add(toDBObject(it.next()));
                }
            }
            // N arguments of pipeline operations
            else {
                for (int i = 0; i < len; i++) {
                    pipeline.add(toDBObject(args[i]));
                }
            }
        }

        if (hasOptions) {
            // returns Cursor - requires >= MongoDB 2.6
            return toCFML(coll.aggregate(pipeline, options));
        } else {
            // returns AggregationOutput
            return toCFML(coll.aggregate(pipeline));
        }
    }
    // count
    if (methodName.equals("count")) {
        int len = checkArgLength("count", args, 0, 1);
        if (len == 0) {
            return toCFML(coll.count());
        } else if (len == 1) {
            return toCFML(coll.count(toDBObject(args[0])));
        }
    }
    // dataSize
    if (methodName.equals("dataSize")) {
        checkArgLength("dataSize", args, 0, 0);
        return toCFML(coll.getStats().get("size"));
    }

    // distinct
    if (methodName.equals("distinct")) {
        int len = checkArgLength("distinct", args, 1, 2);
        if (len == 1) {
            return toCFML(coll.distinct(caster.toString(args[0])));
        } else if (len == 2) {
            return toCFML(coll.distinct(caster.toString(args[0]), toDBObject(args[1])));
        }
    }
    // drop
    if (methodName.equals("drop")) {
        checkArgLength("drop", args, 0, 0);
        coll.drop();
        return null;
    }

    // dropIndex
    if (methodName.equals("dropIndex")) {
        checkArgLength("dropIndex", args, 1, 1);
        DBObject dbo = toDBObject(args[0], null);
        if (dbo != null)
            coll.dropIndex(dbo);
        else
            coll.dropIndex(caster.toString(args[0]));

        return null;
    }
    // dropIndexes
    if (methodName.equals("dropIndexes")) {
        int len = checkArgLength("dropIndexes", args, 0, 1);
        if (len == 0) {
            coll.dropIndexes();
            return null;
        } else if (len == 1) {
            coll.dropIndexes(caster.toString(args[0]));
            return null;
        }
    }

    // createIndex
    if (methodName.equals("createIndex") || methodName.equals("ensureIndex")) {
        int len = checkArgLength("createIndex", args, 1, 3);
        if (len == 1) {
            DBObject dbo = toDBObject(args[0], null);
            if (dbo != null)
                coll.createIndex(dbo);
            else
                coll.createIndex(caster.toString(args[0]));
            return null;
        }
        if (len == 2) {
            DBObject p1 = toDBObject(args[0]);
            DBObject p2 = toDBObject(args[1], null);
            if (p2 != null)
                coll.createIndex(p1, p2);
            else
                coll.createIndex(p1, caster.toString(args[1]));
            return null;
        } else if (len == 3) {
            coll.createIndex(toDBObject(args[0]), caster.toString(args[1]), caster.toBooleanValue(args[2]));
            return null;
        }
    }

    // getStats
    if (methodName.equals("getStats") || methodName.equals("stats")) {
        checkArgLength("getStats", args, 0, 0);
        return toCFML(coll.getStats());
    }

    // getIndexes
    if (methodName.equals("getIndexes") || methodName.equals("getIndexInfo")) {
        checkArgLength(methodName.getString(), args, 0, 0);
        return toCFML(coll.getIndexInfo());
    }

    // getWriteConcern
    if (methodName.equals("getWriteConcern")) {
        checkArgLength("getWriteConcern", args, 0, 0);
        return toCFML(coll.getWriteConcern());
    }

    // find
    if (methodName.equals("find")) {
        int len = checkArgLength("find", args, 0, 3);
        DBCursor cursor = null;
        if (len == 0) {
            cursor = coll.find();
        } else if (len == 1) {
            cursor = coll.find(toDBObject(args[0]));
        } else if (len == 2) {
            cursor = coll.find(toDBObject(args[0]), toDBObject(args[1]));
        } else if (len == 3) {
            cursor = coll.find(toDBObject(args[0]), toDBObject(args[1])).skip(caster.toIntValue(args[2]));
        }

        return toCFML(cursor);
    }
    // findOne
    else if (methodName.equals("findOne")) {
        int len = checkArgLength("findOne", args, 0, 3);
        DBObject obj = null;
        if (len == 0) {
            obj = coll.findOne();
        } else if (len == 1) {
            DBObject arg1 = toDBObject(args[0], null);
            if (arg1 != null)
                obj = coll.findOne(arg1);
            else
                obj = coll.findOne(args[0]);

        } else if (len == 2) {
            DBObject arg1 = toDBObject(args[0], null);
            if (arg1 != null)
                obj = coll.findOne(arg1, toDBObject(args[1]));
            else
                obj = coll.findOne(args[0], toDBObject(args[1]));
        } else if (len == 3) {
            obj = coll.findOne(toDBObject(args[0]), toDBObject(args[1]), toDBObject(args[2]));
        }
        return toCFML(obj);
    }
    // findAndRemove
    if (methodName.equals("findAndRemove")) {
        checkArgLength("findAndRemove", args, 1, 1);
        DBObject obj = coll.findAndRemove(toDBObject(args[0]));
        return toCFML(obj);
    }
    // findAndModify
    if (methodName.equals("findAndModify")) {
        int len = args == null ? 0 : args.length;
        if (len != 2 && len != 3 && len != 7) {
            throw exp.createApplicationException(
                    "the function findAndModify needs 2, 3 or 7 arguments, but you have defined only " + len);
        }
        DBObject obj = null;
        if (len == 2) {
            obj = coll.findAndModify(toDBObject(args[0]), toDBObject(args[1]));
        } else if (len == 3) {
            obj = coll.findAndModify(toDBObject(args[0]), toDBObject(args[1]), toDBObject(args[2]));
        } else if (len == 7) {
            obj = coll.findAndModify(toDBObject(args[0]), toDBObject(args[1]), toDBObject(args[2]),
                    caster.toBooleanValue(args[3]), toDBObject(args[4]), caster.toBooleanValue(args[5]),
                    caster.toBooleanValue(args[6]));
        }

        return toCFML(obj);
    }

    //group
    /*
       TODO: needs GroupCommand
    if(methodName.equals("group")) {
       int len=checkArgLength("group",args,1,1);
       if(len==1){
    return toCFML(coll.group(
       toDBObject(args[0])
    ));
       }
    }*/

    // insert
    if (methodName.equals("insert")) {
        checkArgLength("insert", args, 1, 1);
        return toCFML(coll.insert(toDBObjectArray(args[0])));
    }

    // insertMany(required array documents, struct options) valid options keys are string "writeconcern", boolean "ordered"
    if (methodName.equals("insertMany")) {
        int len = checkArgLength("insertMany", args, 1, 2);
        BulkWriteOperation bulk = coll.initializeOrderedBulkOperation();
        WriteConcern wc = coll.getWriteConcern();

        if (len == 2) {
            DBObject dboOpts = toDBObject(args[1]);
            if (dboOpts.containsField("ordered")) {
                if (!decision.isBoolean(dboOpts.get("ordered")))
                    throw exp.createApplicationException("ordered in options must be boolean value");

                if (!caster.toBooleanValue(dboOpts.get("ordered"))) {
                    bulk = coll.initializeUnorderedBulkOperation();
                }
            }

            if (dboOpts.containsField("writeconcern")) {
                WriteConcern newWc = WriteConcern.valueOf(caster.toString(dboOpts.get("writeconcern")));
                if (newWc != null) {
                    wc = newWc;
                }
            }
        }
        Map<String, Object> result = new LinkedHashMap<String, Object>();
        BulkWriteResult bulkResult;
        List<Map> writeErrors = new ArrayList<Map>();

        Array arr = caster.toArray(args[0]);
        if (arr.size() == 0) {
            result.put("nInserted", 0);
            result.put("writeErrors", writeErrors);
            result.put("acknowledged", true);
            return toCFML(result);
        }

        Iterator<Object> it = arr.valueIterator();
        while (it.hasNext()) {
            bulk.insert(toDBObject(it.next()));
        }

        try {
            bulkResult = bulk.execute(wc);
        } catch (BulkWriteException e) {
            Map<String, Object> bulkErrorItem;
            BulkWriteError bulkError;

            bulkResult = e.getWriteResult();
            List<BulkWriteError> errors = e.getWriteErrors();

            Iterator<BulkWriteError> jj = errors.iterator();
            while (jj.hasNext()) {
                bulkErrorItem = new LinkedHashMap<String, Object>();
                bulkError = jj.next();
                bulkErrorItem.put("index", (bulkError.getIndex() + 1)); // +1 so we get index of item in CFML array
                bulkErrorItem.put("code", bulkError.getCode());
                bulkErrorItem.put("errmsg", bulkError.getMessage());
                bulkErrorItem.put("op", bulkError.getDetails());
                writeErrors.add(bulkErrorItem);
            }
        }

        result.put("acknowledged", bulkResult.isAcknowledged());
        if (bulkResult.isAcknowledged()) {
            result.put("nInserted", bulkResult.getInsertedCount());
            result.put("writeErrors", writeErrors);
        }

        return toCFML(result);
    }

    // bulkWrite(required array operations, struct options) valid options keys are string "writeconcern", boolean "ordered", boolean "bypassDocumentValidation"
    // an operation is a struct with the following keys: { "operation":[insert|update|updateOne|remove|removeOne], "document":[(required if operation is insert) - a doc to insert], "query":[(optional) - the query to find for remove/update operations], "update":[(required for update/updateOne) - the update document] }
    // i.e. dbCollection.bulkWrite([
    //       {"operation":"insert", "document":{"test":"insert"}}
    //       ,{"operation":"updateOne", "query":{"_id":"foo"}, "update":{"$set":{"updated":true}}}         
    //       ,{"operation":"removeOne", "query":{"_id":"goaway"}}         
    // ],{"ordered":false})
    if (methodName.equals("bulkWrite")) {
        int len = checkArgLength("bulkWrite", args, 1, 2);
        BulkWriteOperation bulk = coll.initializeOrderedBulkOperation();
        WriteConcern wc = coll.getWriteConcern();

        if (len == 2) {
            DBObject dboOpts = toDBObject(args[1]);
            if (dboOpts.containsField("ordered")) {
                if (!decision.isBoolean(dboOpts.get("ordered")))
                    throw exp.createApplicationException("ordered in options must be boolean value");

                if (!caster.toBooleanValue(dboOpts.get("ordered"))) {
                    bulk = coll.initializeUnorderedBulkOperation();
                }
            }

            if (dboOpts.containsField("bypassDocumentValidation")) {
                if (!decision.isBoolean(dboOpts.get("bypassDocumentValidation")))
                    throw exp.createApplicationException(
                            "bypassDocumentValidation in options must be boolean value");

                bulk.setBypassDocumentValidation(
                        caster.toBooleanValue(dboOpts.get("bypassDocumentValidation")));
            }

            if (dboOpts.containsField("writeconcern")) {
                WriteConcern newWc = WriteConcern.valueOf(caster.toString(dboOpts.get("writeconcern")));
                if (newWc != null) {
                    wc = newWc;
                }
            }
        }
        Map<String, Object> result = new LinkedHashMap<String, Object>();
        BulkWriteResult bulkResult;
        List<Map> writeErrors = new ArrayList<Map>();

        Array arr = caster.toArray(args[0]);
        if (arr.size() == 0) {
            result.put("nInserted", 0);
            result.put("nMatched", 0);
            result.put("nModified", 0);
            result.put("nRemoved", 0);
            result.put("writeErrors", writeErrors);
            result.put("acknowledged", true);
            return toCFML(result);
        }

        Iterator<Object> it = arr.valueIterator();
        while (it.hasNext()) {

            DBObject operation = toDBObject(it.next());

            if (operation.get("operation") == "update") {
                // do stuff to add update operation
                bulk.find(toDBObject(operation.get("query"))).update(toDBObject(operation.get("update")));
            } else if (operation.get("operation") == "updateOne") {
                // do stuff to add updateOne operation
                bulk.find(toDBObject(operation.get("query"))).updateOne(toDBObject(operation.get("update")));
            } else if (operation.get("operation") == "remove") {
                // do stuff to add remove operation
                bulk.find(toDBObject(operation.get("query"))).remove();
            } else if (operation.get("operation") == "removeOne") {
                // do stuff to add removeOne operation
                bulk.find(toDBObject(operation.get("query"))).removeOne();
            } else if (operation.get("operation") == "insert") {
                bulk.insert(toDBObject(operation.get("document")));
            }

        }

        try {
            bulkResult = bulk.execute(wc);
        } catch (BulkWriteException e) {
            Map<String, Object> bulkErrorItem;
            BulkWriteError bulkError;

            bulkResult = e.getWriteResult();
            List<BulkWriteError> errors = e.getWriteErrors();

            Iterator<BulkWriteError> jj = errors.iterator();
            while (jj.hasNext()) {
                bulkErrorItem = new LinkedHashMap<String, Object>();
                bulkError = jj.next();
                bulkErrorItem.put("index", (bulkError.getIndex() + 1)); // +1 so we get index of item in CFML array
                bulkErrorItem.put("code", bulkError.getCode());
                bulkErrorItem.put("errmsg", bulkError.getMessage());
                bulkErrorItem.put("op", bulkError.getDetails());
                writeErrors.add(bulkErrorItem);
            }
        }

        result.put("acknowledged", bulkResult.isAcknowledged());
        if (bulkResult.isAcknowledged()) {
            result.put("nInserted", bulkResult.getInsertedCount());
            result.put("nMatched", bulkResult.getMatchedCount());
            result.put("nModified", bulkResult.getModifiedCount());
            result.put("nRemoved", bulkResult.getRemovedCount());
            result.put("writeErrors", writeErrors);
        }

        return toCFML(result);
    }

    //mapReduce
    if (methodName.equals("mapReduce")) {
        int len = checkArgLength("mapReduce", args, 4, 4);
        if (len == 4) {
            return toCFML(coll.mapReduce(caster.toString(args[0]), caster.toString(args[1]),
                    caster.toString(args[2]), toDBObject(args[3])));
        }
    }

    // remove
    if (methodName.equals("remove")) {
        checkArgLength("remove", args, 1, 1);
        return toCFML(coll.remove(toDBObject(args[0])));

    }

    // rename
    if (methodName.equals("rename") || methodName.equals("renameCollection")) {
        int len = checkArgLength(methodName.getString(), args, 1, 2);
        if (len == 1) {
            return toCFML(coll.rename(caster.toString(args[0])));
        } else if (len == 2) {
            return toCFML(coll.rename(caster.toString(args[0]), caster.toBooleanValue(args[1])));
        }
    }

    // save
    if (methodName.equals("save")) {
        checkArgLength("save", args, 1, 1);
        return toCFML(coll.save(toDBObject(args[0])));
    }

    // setWriteConcern
    if (methodName.equals("setWriteConcern")) {
        checkArgLength("setWriteConcern", args, 1, 1);
        WriteConcern wc = WriteConcern.valueOf(caster.toString(args[0]));
        if (wc != null) {
            coll.setWriteConcern(wc);
        }
        return null;
    }

    // storageSize
    if (methodName.equals("storageSize")) {
        checkArgLength("storageSize", args, 0, 0);
        return toCFML(coll.getStats().get("storageSize"));
    }

    // totalIndexSize
    if (methodName.equals("totalIndexSize")) {
        checkArgLength("totalIndexSize", args, 0, 0);
        return toCFML(coll.getStats().get("totalIndexSize"));
    }

    // update
    if (methodName.equals("update")) {
        int len = checkArgLength("update", args, 2, 4);
        if (len == 2) {
            return toCFML(coll.update(toDBObject(args[0]), toDBObject(args[1])));
        } else if (len == 3) {
            return toCFML(coll.update(toDBObject(args[0]), toDBObject(args[1]), caster.toBooleanValue(args[2]),
                    false));
        } else if (len == 4) {
            return toCFML(coll.update(toDBObject(args[0]), toDBObject(args[1]), caster.toBooleanValue(args[2]),
                    caster.toBooleanValue(args[3])));
        }
    }

    String functionNames = "aggregate,count,dataSize,distinct,drop,dropIndex,dropIndexes,createIndex,stats,getIndexes,getWriteConcern,find,findOne,findAndRemove,findAndModify,"
            + "group,insert,insertMany,bulkWrite,mapReduce,remove,rename,save,setWriteConcern,storageSize,totalIndexSize,update";

    throw exp.createApplicationException(
            "function " + methodName + " does not exist existing functions are [" + functionNames + "]");
}