Example usage for com.mongodb DBCollection initializeUnorderedBulkOperation

List of usage examples for com.mongodb DBCollection initializeUnorderedBulkOperation

Introduction

In this page you can find the example usage for com.mongodb DBCollection initializeUnorderedBulkOperation.

Prototype

public BulkWriteOperation initializeUnorderedBulkOperation() 

Source Link

Document

Creates a builder for an unordered bulk operation, consisting of an unordered collection of write requests, which can be any combination of inserts, updates, replaces, or removes.

Usage

From source file:com.edduarte.argus.Context.java

License:Apache License

/**
 * Indexes the specified document and detects differences between an older
 * snapshot and the new one. Once differences are collected, saves the resulting
 * index of all occurrences of the new snapshot for future query and comparison
 * jobs.//  w  ww.  j  av  a  2s.c o m
 */
@Override
public boolean detectDifferences(String url) {

    // create a new document snapshot for the provided url
    DocumentBuilder builder = DocumentBuilder.fromUrl(url).withLanguageDetector(langDetector);

    if (isStoppingEnabled) {
        builder.withStopwords();
    }
    if (isStemmingEnabled) {
        builder.withStemming();
    }
    if (ignoreCase) {
        builder.ignoreCase();
    }
    Document newDocument = builder.build(occurrencesDB, parserPool);
    if (newDocument == null) {
        // A problem occurred during processing, mostly during the fetching phase.
        // This could happen if the page was unavailable at the time.
        return false;
    }

    // check if there is a older document in the collection
    Document oldDocument = collection.get(url);

    if (oldDocument != null) {
        // there was already a document for this url on the collection, so
        // detect differences between them and add them to the differences
        // database
        DifferenceDetector detector = new DifferenceDetector(oldDocument, newDocument, parserPool);
        List<Difference> results = detector.call();

        removeExistingDifferences(url);
        DBCollection diffColl = differencesDB.getCollection(url);

        if (!results.isEmpty()) {
            BulkWriteOperation bulkOp = diffColl.initializeUnorderedBulkOperation();
            results.forEach(bulkOp::insert);
            bulkOp.execute();
            bulkOp = null;
            diffColl = null;
        }
    }

    // replace the old document in the collection with the new one
    collection.remove(url);
    collection.add(newDocument);

    return true;
}

From source file:com.impetus.client.mongodb.MongoDBClient.java

License:Apache License

@Override
public int executeBatch() {
    Map<String, BulkWriteOperation> bulkWriteOperationMap = new HashMap<String, BulkWriteOperation>();
    int size = 0;
    for (Node node : nodes) {
        if (node.isDirty()) {
            node.handlePreEvent();/* w ww. j a v a 2s.com*/
            // delete can not be executed in batch
            if (node.isInState(RemovedState.class)) {
                delete(node.getData(), node.getEntityId());
            } else {
                List<RelationHolder> relationHolders = getRelationHolders(node);
                EntityMetadata metadata = KunderaMetadataManager.getEntityMetadata(kunderaMetadata,
                        node.getDataClass());
                Map<String, DBObject> documents = handler.getDocumentFromEntity(metadata, node.getData(),
                        relationHolders, kunderaMetadata);
                for (String tableName : documents.keySet()) {
                    if (!bulkWriteOperationMap.containsKey(tableName)) {
                        DBCollection collection = mongoDb.getCollection(tableName);
                        BulkWriteOperation builder = null;
                        if (isOrderedBulkOperation()) {
                            builder = collection.initializeOrderedBulkOperation();
                        } else {
                            builder = collection.initializeUnorderedBulkOperation();
                        }
                        bulkWriteOperationMap.put(tableName, builder);
                    }

                    if (!node.isUpdate()) {
                        bulkWriteOperationMap.get(tableName).insert(documents.get(tableName));
                    }

                    else {
                        bulkWriteOperationMap.get(tableName).find(new BasicDBObject("_id", node.getEntityId()))
                                .upsert().replaceOne(documents.get(tableName));
                    }
                    size++;
                }
                indexNode(node, metadata);
            }
            node.handlePostEvent();
        }
    }
    onFlushBatch(bulkWriteOperationMap);
    return size;
}

From source file:com.redhat.lightblue.mongo.crud.BasicDocDeleter.java

License:Open Source License

@Override
public void delete(CRUDOperationContext ctx, DBCollection collection, DBObject mongoQuery,
        CRUDDeleteResponse response) {/*from w  ww.j  ava2s  .c  o m*/
    LOGGER.debug("Removing docs with {}", mongoQuery);

    int numDeleted = 0;

    if (!hookOptimization || ctx.getHookManager().hasHooks(ctx, CRUDOperation.DELETE)) {
        LOGGER.debug("There are hooks, retrieve-delete");
        try (DBCursor cursor = collection.find(mongoQuery, null)) {
            // Set read preference to primary for read-for-update operations
            cursor.setReadPreference(ReadPreference.primary());

            // All docs, to be put into the context
            ArrayList<DocCtx> contextDocs = new ArrayList<>();
            // ids to delete from the db
            List<Object> idsToDelete = new ArrayList<>(batchSize);
            while (cursor.hasNext()) {

                // We will use this index to access the documents deleted in this batch
                int thisBatchIndex = contextDocs.size();
                if (idsToDelete.size() < batchSize) {
                    // build batch
                    DBObject doc = cursor.next();
                    DocTranslator.TranslatedDoc tdoc = translator.toJson(doc);
                    DocCtx docCtx = new DocCtx(tdoc.doc, tdoc.rmd);
                    docCtx.setOriginalDocument(docCtx);
                    docCtx.setCRUDOperationPerformed(CRUDOperation.DELETE);
                    contextDocs.add(docCtx);
                    idsToDelete.add(doc.get(MongoCRUDController.ID_STR));
                }

                if (idsToDelete.size() == batchSize || !cursor.hasNext()) {
                    // batch built or run out of documents                        
                    BulkWriteOperation bw = collection.initializeUnorderedBulkOperation();

                    for (Object id : idsToDelete) {
                        // doing a bulk of single operations instead of removing by initial query
                        // that way we know which documents were not removed
                        bw.find(new BasicDBObject("_id", id)).remove();
                    }

                    BulkWriteResult result = null;
                    try {
                        if (writeConcern == null) {
                            LOGGER.debug("Bulk deleting docs");
                            result = bw.execute();
                        } else {
                            LOGGER.debug("Bulk deleting docs with writeConcern={} from execution",
                                    writeConcern);
                            result = bw.execute(writeConcern);
                        }
                        LOGGER.debug("Bulk deleted docs - attempted {}, deleted {}", idsToDelete.size(),
                                result.getRemovedCount());
                    } catch (BulkWriteException bwe) {
                        LOGGER.error("Bulk write exception", bwe);
                        handleBulkWriteError(bwe.getWriteErrors(),
                                contextDocs.subList(thisBatchIndex, contextDocs.size()));
                        result = bwe.getWriteResult();
                    } catch (RuntimeException e) {
                        LOGGER.error("Exception", e);
                        throw e;
                    } finally {

                        numDeleted += result.getRemovedCount();
                        // clear list before processing next batch
                        idsToDelete.clear();
                    }
                }
            }
            ctx.setDocumentStream(new ListDocumentStream<DocCtx>(contextDocs));
        }
    } else {
        LOGGER.debug("There are no hooks, deleting in bulk");
        try {
            if (writeConcern == null) {
                numDeleted = collection.remove(mongoQuery).getN();
            } else {
                numDeleted = collection.remove(mongoQuery, writeConcern).getN();
            }
        } catch (MongoException e) {
            LOGGER.error("Deletion error", e);
            throw e;
        }
        ctx.setDocumentStream(new ListDocumentStream<DocCtx>(new ArrayList<DocCtx>()));
    }

    response.setNumDeleted(numDeleted);
}

From source file:com.redhat.lightblue.mongo.crud.BasicDocSaver.java

License:Open Source License

private void insertDocs(CRUDOperationContext ctx, DBCollection collection, List<DocInfo> list) {
    if (!list.isEmpty()) {
        LOGGER.debug("Inserting {} docs", list.size());
        if (!md.getAccess().getInsert().hasAccess(ctx.getCallerRoles())) {
            for (DocInfo doc : list) {
                doc.inputDoc.addError(/* w w  w .  j  av a2s. c o  m*/
                        Error.get("insert", MongoCrudConstants.ERR_NO_ACCESS, "insert:" + md.getName()));
            }
        } else {
            List<DocInfo> insertionAttemptList = new ArrayList<>(list.size());
            for (DocInfo doc : list) {
                Set<Path> paths = roleEval.getInaccessibleFields_Insert(doc.inputDoc);
                LOGGER.debug("Inaccessible fields:{}", paths);
                if (paths == null || paths.isEmpty()) {
                    DocTranslator.populateDocHiddenFields(doc.newDoc, md);
                    DocVerUtil.overwriteDocVer(doc.newDoc, docver);
                    insertionAttemptList.add(doc);
                } else {
                    for (Path path : paths) {
                        doc.inputDoc.addError(
                                Error.get("insert", CrudConstants.ERR_NO_FIELD_INSERT_ACCESS, path.toString()));
                    }
                }
            }
            LOGGER.debug("After access checks, inserting {} docs", insertionAttemptList.size());
            if (!insertionAttemptList.isEmpty()) {
                BulkWriteOperation bw = collection.initializeUnorderedBulkOperation();
                for (DocInfo doc : insertionAttemptList) {
                    ctx.getFactory().getInterceptors().callInterceptors(InterceptPoint.PRE_CRUD_INSERT_DOC, ctx,
                            doc.inputDoc);
                    bw.insert(doc.newDoc);
                    doc.inputDoc.setCRUDOperationPerformed(CRUDOperation.INSERT);
                }
                try {
                    if (writeConcern == null) {
                        LOGGER.debug("Bulk inserting docs");
                        bw.execute();
                    } else {
                        LOGGER.debug("Bulk inserting docs with writeConcern={} from execution", writeConcern);
                        bw.execute(writeConcern);
                    }
                } catch (BulkWriteException bwe) {
                    LOGGER.error("Bulk write exception", bwe);
                    handleBulkWriteError(bwe.getWriteErrors(), "insert", insertionAttemptList);
                } catch (RuntimeException e) {
                    LOGGER.error("Exception", e);
                    throw e;
                } finally {
                }
            }
        }
    }
}

From source file:com.redhat.lightblue.mongo.crud.UpdateIfSameProtocol.java

License:Open Source License

public UpdateIfSameProtocol(DBCollection collection, WriteConcern writeConcern) {
    this.collection = collection;
    this.writeConcern = writeConcern;
    bwo = collection.initializeUnorderedBulkOperation();
}

From source file:com.staticvillage.recommender.indexer.MongoDBIndexer.java

License:Apache License

@Override
public void addBeans(List<Place> beans) throws IndexerException {
    DBCollection dbCollection = instanceDB.getCollection(collection);
    BulkWriteOperation builder = dbCollection.initializeUnorderedBulkOperation();

    for (Place place : beans) {
        builder.insert(toDBObject(place));
    }//from w ww .j av a2s . c o  m

    builder.execute();
}

From source file:com.xoriant.akka.mongodb.bulkimport.actor.MongoInsertionActor.java

License:Apache License

@Override
public void onReceive(Object message) throws Exception {
    if (message instanceof CSVRecordBatchMsg) {
        CSVRecordBatchMsg csvRecordBatch = (CSVRecordBatchMsg) message;
        System.out.println("InsertionActor : Batch no " + csvRecordBatch.getBatchNo() + " received ack");
        DB db = mongoClient.getDB("akka-bulkimport");
        DBCollection personColl = db.getCollection("persons");
        BulkWriteOperation builder = personColl.initializeUnorderedBulkOperation();
        List<BasicDBObject> persons = csvRecordBatch.getRecords();
        for (BasicDBObject personDBObject : persons) {
            if (validate(personDBObject)) {
                builder.insert(personDBObject);
            }/*from w  ww. j a  va2 s.  c  o  m*/
        }
        BulkWriteResult result = builder.execute();
        BatchCompleteMsg batchComplete = new BatchCompleteMsg(csvRecordBatch.getBatchNo(),
                result.getInsertedCount());
        getSender().tell(batchComplete, getSelf());
    } else if (message instanceof EndOfFileMsg) {
        System.out.println("InsertionActor: EOF received");
    } else {
        unhandled(message);
    }
}

From source file:edu.csulaerp.db.ReferenceMongo.java

License:Apache License

/**
 * Run this main method to see the output of this quick example.
 *
 * @param args takes no args/*from w w  w .  ja v  a  2 s. c o m*/
 * @throws UnknownHostException if it cannot connect to a MongoDB instance at localhost:27017
 */
public static void main(final String[] args) throws UnknownHostException {
    // connect to the local database server
    MongoClient mongoClient = new MongoClient();

    /*
    // Authenticate - optional
    MongoCredential credential = MongoCredential.createMongoCRCredential(userName, database, password);
    MongoClient mongoClient = new MongoClient(new ServerAddress(), Arrays.asList(credential));
    */

    // get handle to "mydb"
    DB db = mongoClient.getDB("mydb");

    // get a list of the collections in this database and print them out
    Set<String> collectionNames = db.getCollectionNames();
    for (final String s : collectionNames) {
        System.out.println(s);
    }

    // get a collection object to work with
    DBCollection coll = db.getCollection("testCollection");

    // drop all the data in it
    coll.drop();

    // make a document and insert it
    BasicDBObject doc = new BasicDBObject("name", "MongoDB").append("type", "database").append("count", 1)
            .append("info", new BasicDBObject("x", 203).append("y", 102));

    coll.insert(doc);

    // get it (since it's the only one in there since we dropped the rest earlier on)
    DBObject myDoc = coll.findOne();
    System.out.println(myDoc);

    // now, lets add lots of little documents to the collection so we can explore queries and cursors
    for (int i = 0; i < 100; i++) {
        coll.insert(new BasicDBObject().append("i", i));
    }
    System.out
            .println("total # of documents after inserting 100 small ones (should be 101) " + coll.getCount());

    // lets get all the documents in the collection and print them out
    DBCursor cursor = coll.find();
    try {
        while (cursor.hasNext()) {
            System.out.println(cursor.next());
        }
    } finally {
        cursor.close();
    }

    // now use a query to get 1 document out
    BasicDBObject query = new BasicDBObject("i", 71);
    cursor = coll.find(query);

    try {
        while (cursor.hasNext()) {
            System.out.println(cursor.next());
        }
    } finally {
        cursor.close();
    }

    // $ Operators are represented as strings
    query = new BasicDBObject("j", new BasicDBObject("$ne", 3)).append("k", new BasicDBObject("$gt", 10));

    cursor = coll.find(query);

    try {
        while (cursor.hasNext()) {
            System.out.println(cursor.next());
        }
    } finally {
        cursor.close();
    }

    // now use a range query to get a larger subset
    // find all where i > 50
    query = new BasicDBObject("i", new BasicDBObject("$gt", 50));
    cursor = coll.find(query);

    try {
        while (cursor.hasNext()) {
            System.out.println(cursor.next());
        }
    } finally {
        cursor.close();
    }

    // range query with multiple constraints
    query = new BasicDBObject("i", new BasicDBObject("$gt", 20).append("$lte", 30));
    cursor = coll.find(query);

    try {
        while (cursor.hasNext()) {
            System.out.println(cursor.next());
        }
    } finally {
        cursor.close();
    }

    // Count all documents in a collection but take a maximum second to do so
    coll.find().maxTime(1, SECONDS).count();

    // Bulk operations
    BulkWriteOperation builder = coll.initializeOrderedBulkOperation();
    builder.insert(new BasicDBObject("_id", 1));
    builder.insert(new BasicDBObject("_id", 2));
    builder.insert(new BasicDBObject("_id", 3));

    builder.find(new BasicDBObject("_id", 1)).updateOne(new BasicDBObject("$set", new BasicDBObject("x", 2)));
    builder.find(new BasicDBObject("_id", 2)).removeOne();
    builder.find(new BasicDBObject("_id", 3)).replaceOne(new BasicDBObject("_id", 3).append("x", 4));

    BulkWriteResult result = builder.execute();
    System.out.println("Ordered bulk write result : " + result);

    // Unordered bulk operation - no guarantee of order of operation
    builder = coll.initializeUnorderedBulkOperation();
    builder.find(new BasicDBObject("_id", 1)).removeOne();
    builder.find(new BasicDBObject("_id", 2)).removeOne();

    result = builder.execute();
    System.out.println("Ordered bulk write result : " + result);

    // parallelScan
    ParallelScanOptions parallelScanOptions = ParallelScanOptions.builder().numCursors(3).batchSize(300)
            .build();

    List<Cursor> cursors = coll.parallelScan(parallelScanOptions);
    for (Cursor pCursor : cursors) {
        while (pCursor.hasNext()) {
            System.out.println(pCursor.next());
        }
    }

    // release resources
    db.dropDatabase();
    mongoClient.close();
}

From source file:edu.umass.cs.gnsserver.database.MongoRecords.java

License:Apache License

/**
 *
 * @param collectionName//ww w . j  a  v  a  2s .  c  om
 * @param values
 * @throws FailedDBOperationException
 * @throws RecordExistsException
 */
public void bulkUpdate(String collectionName, Map<String, JSONObject> values)
        throws FailedDBOperationException, RecordExistsException {
    //String primaryKey = mongoCollectionSpecs.getCollectionSpec(collectionName).getPrimaryKey().getName();
    DBCollection collection = db.getCollection(collectionName);
    String primaryKey = mongoCollectionSpecs.getCollectionSpec(collectionName).getPrimaryKey().getName();
    db.requestEnsureConnection();
    BulkWriteOperation unordered = collection.initializeUnorderedBulkOperation();
    for (Map.Entry<String, JSONObject> entry : values.entrySet()) {
        BasicDBObject query = new BasicDBObject(primaryKey, entry.getKey());
        JSONObject value = entry.getValue();
        if (value != null) {
            DBObject document;
            try {
                document = (DBObject) JSON.parse(value.toString());
            } catch (Exception e) {
                throw new FailedDBOperationException(collectionName, "bulkUpdate",
                        "Unable to parse json" + e.getMessage());
            }
            unordered.find(query).upsert().replaceOne(document);
        } else {
            unordered.find(query).removeOne();
        }
    }
    // Maybe check the result?
    unordered.execute();
}

From source file:mongodb.performance.MongoDBPerformance.java

/**
 * @param args the command line arguments
 *//*from w  w  w.j a  va 2  s . c o  m*/
public static void main(String[] args) throws UnknownHostException, FileNotFoundException, IOException {
    if (args.length == 0) {
        System.out.println("Parmetro no informado!");
        System.exit(-1);
    }
    System.out.println("Parmetro: " + args[0]);

    MongoClient mongoClient = new MongoClient();
    //MongoClient mongoClient = new MongoClient( "54.172.218.64" , 27017 );
    DB db = mongoClient.getDB("myDatabase");

    DBCollection collection = db.getCollection("ads");
    collection.drop();

    BulkWriteOperation builder = collection.initializeUnorderedBulkOperation();

    FileInputStream fileInputStream = new FileInputStream(".\\resources\\MongoDB" + args[0] + ".txt");
    BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(fileInputStream));
    // Insert
    // Time start    
    long start = System.currentTimeMillis();

    String line;
    while ((line = bufferedReader.readLine()) != null) {
        DBObject bson = (DBObject) JSON.parse(line);
        builder.insert(bson);
    }
    bufferedReader.close();
    builder.execute();
    //Time end
    long elapsed = System.currentTimeMillis() - start;
    System.out.println("[insert] Time elapsed: " + elapsed + " ms");

    // Update
    // Time start    
    start = System.currentTimeMillis();
    collection.updateMulti(new BasicDBObject(),
            new BasicDBObject("$set", new BasicDBObject().append("ano", 2006)));
    // Time end
    elapsed = System.currentTimeMillis() - start;
    System.out.println("[update] Time elapsed: " + elapsed + " ms");

    // Select
    // Time start    
    start = System.currentTimeMillis();
    BasicDBObject keys = new BasicDBObject();
    keys.put("_id", 1);
    keys.put("modeloCarro.marca", 1);
    keys.put("modeloCarro.nome", 1);
    keys.put("uf", 1);
    keys.put("placa_carro", 1);
    keys.put("qtd_portas", 1);
    keys.put("cambio", 1);
    keys.put("combustivel", 1);
    keys.put("cor", 1);
    keys.put("km", 1);
    keys.put("valor", 1);
    keys.put("detalhe", 1);
    BasicDBObject sort = new BasicDBObject("_id", 1);

    DBCursor cursor = collection.find(new BasicDBObject(), keys).sort(sort);
    while (cursor.hasNext()) {
        cursor.next();
    }
    // Time end
    elapsed = System.currentTimeMillis() - start;
    System.out.println("[select] Time elapsed: " + elapsed + " ms");

    // Delete
    // Time start    
    start = System.currentTimeMillis();
    collection.remove(new BasicDBObject());
    // Time end
    elapsed = System.currentTimeMillis() - start;
    System.out.println("[delete] Time elapsed: " + elapsed + " ms");
}