Example usage for com.mongodb BulkWriteOperation insert

List of usage examples for com.mongodb BulkWriteOperation insert

Introduction

In this page you can find the example usage for com.mongodb BulkWriteOperation insert.

Prototype

public void insert(final DBObject document) 

Source Link

Document

Add an insert request to the bulk operation

Usage

From source file:com.camel.realtimelog.PersistenceMongoAccessor.java

License:Open Source License

@Override
public void saveAll(List<DBObject> logObjects) throws Exception {
    BulkWriteOperation operation = logs.initializeUnorderedBulkOperation();
    for (DBObject dbObj : logObjects) {
        operation.insert(dbObj);
    }//from   w  ww  .  j a v  a2 s  .c o m
    operation.execute();
    //logs.insert(logObjects);
    logger.debug("save " + logObjects.size() + " errror logs success");
}

From source file:com.edduarte.argus.document.Document.java

License:Apache License

public void addOccurrences(Iterator<Occurrence> occurrencesIt) {
    if (!occurrencesIt.hasNext()) {
        return;/* ww w .j  ava 2s  . c  o  m*/
    }
    BulkWriteOperation builder = occCollection.initializeUnorderedBulkOperation();
    while (occurrencesIt.hasNext()) {
        builder.insert(occurrencesIt.next());
    }
    builder.execute();
    builder = null;
}

From source file:com.redhat.lightblue.mongo.crud.BasicDocSaver.java

License:Open Source License

private void insertDocs(CRUDOperationContext ctx, DBCollection collection, List<DocInfo> list) {
    if (!list.isEmpty()) {
        LOGGER.debug("Inserting {} docs", list.size());
        if (!md.getAccess().getInsert().hasAccess(ctx.getCallerRoles())) {
            for (DocInfo doc : list) {
                doc.inputDoc.addError(/*from www .j  a  v a2  s.  c o m*/
                        Error.get("insert", MongoCrudConstants.ERR_NO_ACCESS, "insert:" + md.getName()));
            }
        } else {
            List<DocInfo> insertionAttemptList = new ArrayList<>(list.size());
            for (DocInfo doc : list) {
                Set<Path> paths = roleEval.getInaccessibleFields_Insert(doc.inputDoc);
                LOGGER.debug("Inaccessible fields:{}", paths);
                if (paths == null || paths.isEmpty()) {
                    DocTranslator.populateDocHiddenFields(doc.newDoc, md);
                    DocVerUtil.overwriteDocVer(doc.newDoc, docver);
                    insertionAttemptList.add(doc);
                } else {
                    for (Path path : paths) {
                        doc.inputDoc.addError(
                                Error.get("insert", CrudConstants.ERR_NO_FIELD_INSERT_ACCESS, path.toString()));
                    }
                }
            }
            LOGGER.debug("After access checks, inserting {} docs", insertionAttemptList.size());
            if (!insertionAttemptList.isEmpty()) {
                BulkWriteOperation bw = collection.initializeUnorderedBulkOperation();
                for (DocInfo doc : insertionAttemptList) {
                    ctx.getFactory().getInterceptors().callInterceptors(InterceptPoint.PRE_CRUD_INSERT_DOC, ctx,
                            doc.inputDoc);
                    bw.insert(doc.newDoc);
                    doc.inputDoc.setCRUDOperationPerformed(CRUDOperation.INSERT);
                }
                try {
                    if (writeConcern == null) {
                        LOGGER.debug("Bulk inserting docs");
                        bw.execute();
                    } else {
                        LOGGER.debug("Bulk inserting docs with writeConcern={} from execution", writeConcern);
                        bw.execute(writeConcern);
                    }
                } catch (BulkWriteException bwe) {
                    LOGGER.error("Bulk write exception", bwe);
                    handleBulkWriteError(bwe.getWriteErrors(), "insert", insertionAttemptList);
                } catch (RuntimeException e) {
                    LOGGER.error("Exception", e);
                    throw e;
                } finally {
                }
            }
        }
    }
}

From source file:com.staticvillage.recommender.indexer.MongoDBIndexer.java

License:Apache License

@Override
public void addBeans(List<Place> beans) throws IndexerException {
    DBCollection dbCollection = instanceDB.getCollection(collection);
    BulkWriteOperation builder = dbCollection.initializeUnorderedBulkOperation();

    for (Place place : beans) {
        builder.insert(toDBObject(place));
    }// www . j  av a 2s.  c  om

    builder.execute();
}

From source file:com.stratio.decision.functions.SaveToMongoActionExecutionFunction.java

License:Apache License

@Override
public void process(Iterable<StratioStreamingMessage> messages) throws Exception {

    Integer partitionSize = maxBatchSize;

    if (partitionSize == null || partitionSize <= 0) {
        partitionSize = Iterables.size(messages);
    }// w w  w .j  ava  2  s .c om

    Iterable<List<StratioStreamingMessage>> partitionIterables = Iterables.partition(messages, partitionSize);

    try {

        for (List<StratioStreamingMessage> messageList : partitionIterables) {

            Map<String, BulkWriteOperation> elementsToInsert = new HashMap<String, BulkWriteOperation>();

            for (StratioStreamingMessage event : messageList) {
                BasicDBObject object = new BasicDBObject(TIMESTAMP_FIELD, event.getTimestamp());
                for (ColumnNameTypeValue columnNameTypeValue : event.getColumns()) {
                    object.append(columnNameTypeValue.getColumn(), columnNameTypeValue.getValue());
                }

                BulkWriteOperation bulkInsertOperation = elementsToInsert.get(event.getStreamName());

                if (bulkInsertOperation == null) {
                    bulkInsertOperation = getDB().getCollection(event.getStreamName())
                            .initializeUnorderedBulkOperation();

                    elementsToInsert.put(event.getStreamName(), bulkInsertOperation);
                    getDB().getCollection(event.getStreamName())
                            .createIndex(new BasicDBObject(TIMESTAMP_FIELD, -1));
                }

                bulkInsertOperation.insert(object);
            }

            for (Entry<String, BulkWriteOperation> stratioStreamingMessage : elementsToInsert.entrySet()) {
                stratioStreamingMessage.getValue().execute();
            }
        }

    } catch (Exception e) {
        log.error("Error saving in Mongo: " + e.getMessage());
    }
}

From source file:com.stratio.streaming.functions.SaveToMongoActionExecutionFunction.java

License:Apache License

@Override
public void process(Iterable<StratioStreamingMessage> messages) throws Exception {
    Map<String, BulkWriteOperation> elementsToInsert = new HashMap<String, BulkWriteOperation>();

    for (StratioStreamingMessage event : messages) {
        BasicDBObject object = new BasicDBObject(TIMESTAMP_FIELD, event.getTimestamp());
        for (ColumnNameTypeValue columnNameTypeValue : event.getColumns()) {
            object.append(columnNameTypeValue.getColumn(), columnNameTypeValue.getValue());
        }//ww w  .j av a  2 s .c om

        BulkWriteOperation bulkInsertOperation = elementsToInsert.get(event.getStreamName());

        if (bulkInsertOperation == null) {
            bulkInsertOperation = getDB().getCollection(event.getStreamName())
                    .initializeUnorderedBulkOperation();

            elementsToInsert.put(event.getStreamName(), bulkInsertOperation);
            getDB().getCollection(event.getStreamName()).createIndex(new BasicDBObject(TIMESTAMP_FIELD, -1));
        }

        bulkInsertOperation.insert(object);
    }

    for (Entry<String, BulkWriteOperation> stratioStreamingMessage : elementsToInsert.entrySet()) {
        stratioStreamingMessage.getValue().execute();
    }
}

From source file:com.xoriant.akka.mongodb.bulkimport.actor.MongoInsertionActor.java

License:Apache License

@Override
public void onReceive(Object message) throws Exception {
    if (message instanceof CSVRecordBatchMsg) {
        CSVRecordBatchMsg csvRecordBatch = (CSVRecordBatchMsg) message;
        System.out.println("InsertionActor : Batch no " + csvRecordBatch.getBatchNo() + " received ack");
        DB db = mongoClient.getDB("akka-bulkimport");
        DBCollection personColl = db.getCollection("persons");
        BulkWriteOperation builder = personColl.initializeUnorderedBulkOperation();
        List<BasicDBObject> persons = csvRecordBatch.getRecords();
        for (BasicDBObject personDBObject : persons) {
            if (validate(personDBObject)) {
                builder.insert(personDBObject);
            }//from   w w w .j a v a  2s  .  c  om
        }
        BulkWriteResult result = builder.execute();
        BatchCompleteMsg batchComplete = new BatchCompleteMsg(csvRecordBatch.getBatchNo(),
                result.getInsertedCount());
        getSender().tell(batchComplete, getSelf());
    } else if (message instanceof EndOfFileMsg) {
        System.out.println("InsertionActor: EOF received");
    } else {
        unhandled(message);
    }
}

From source file:com.zjy.mongo.output.MongoOutputCommitter.java

License:Apache License

@Override
public void commitTask(final TaskAttemptContext taskContext) throws IOException {
    LOG.info("Committing task.");

    collections = MongoConfigUtil.getOutputCollections(taskContext.getConfiguration());
    numberOfHosts = collections.size();//w w  w .  ja  v a2  s . c om

    // Get temporary file.
    Path tempFilePath = getTaskAttemptPath(taskContext);
    LOG.info("Committing from temporary file: " + tempFilePath.toString());
    long filePos = 0, fileLen;
    FSDataInputStream inputStream = null;
    try {
        FileSystem fs = FileSystem.get(taskContext.getConfiguration());
        inputStream = fs.open(tempFilePath);
        fileLen = fs.getFileStatus(tempFilePath).getLen();
    } catch (IOException e) {
        LOG.error("Could not open temporary file for committing", e);
        cleanupAfterCommit(inputStream, taskContext);
        throw e;
    }

    int maxDocs = MongoConfigUtil.getBatchSize(taskContext.getConfiguration());
    int curBatchSize = 0;
    DBCollection coll = getDbCollectionByRoundRobin();
    BulkWriteOperation bulkOp = coll.initializeOrderedBulkOperation();

    // Read Writables out of the temporary file.
    BSONWritable bw = new BSONWritable();
    MongoUpdateWritable muw = new MongoUpdateWritable();
    while (filePos < fileLen) {
        try {
            // Determine writable type, and perform corresponding operation
            // on MongoDB.
            int mwType = inputStream.readInt();
            if (MongoWritableTypes.BSON_WRITABLE == mwType) {
                bw.readFields(inputStream);
                bulkOp.insert(new BasicDBObject(bw.getDoc().toMap()));
            } else if (MongoWritableTypes.MONGO_UPDATE_WRITABLE == mwType) {
                muw.readFields(inputStream);
                DBObject query = new BasicDBObject(muw.getQuery().toMap());
                DBObject modifiers = new BasicDBObject(muw.getModifiers().toMap());
                if (muw.isMultiUpdate()) {
                    if (muw.isUpsert()) {
                        bulkOp.find(query).upsert().update(modifiers);
                    } else {
                        bulkOp.find(query).update(modifiers);
                    }
                } else {
                    if (muw.isUpsert()) {
                        bulkOp.find(query).upsert().updateOne(modifiers);
                    } else {
                        bulkOp.find(query).updateOne(modifiers);
                    }
                }
            } else {
                throw new IOException("Unrecognized type: " + mwType);
            }
            filePos = inputStream.getPos();
            // Write to MongoDB if the batch is full, or if this is the last
            // operation to be performed for the Task.
            if (++curBatchSize >= maxDocs || filePos >= fileLen) {
                try {
                    bulkOp.execute();
                } catch (MongoException e) {
                    LOG.error("Could not write to MongoDB", e);
                    throw e;
                }
                coll = getDbCollectionByRoundRobin();
                bulkOp = coll.initializeOrderedBulkOperation();
                curBatchSize = 0;

                // Signal progress back to Hadoop framework so that we
                // don't time out.
                taskContext.progress();
            }
        } catch (IOException e) {
            LOG.error("Error reading from temporary file", e);
            throw e;
        }
    }

    cleanupAfterCommit(inputStream, taskContext);
}

From source file:edu.csulaerp.db.ReferenceMongo.java

License:Apache License

/**
 * Run this main method to see the output of this quick example.
 *
 * @param args takes no args//from   w ww  .  ja v a2 s.c  o  m
 * @throws UnknownHostException if it cannot connect to a MongoDB instance at localhost:27017
 */
public static void main(final String[] args) throws UnknownHostException {
    // connect to the local database server
    MongoClient mongoClient = new MongoClient();

    /*
    // Authenticate - optional
    MongoCredential credential = MongoCredential.createMongoCRCredential(userName, database, password);
    MongoClient mongoClient = new MongoClient(new ServerAddress(), Arrays.asList(credential));
    */

    // get handle to "mydb"
    DB db = mongoClient.getDB("mydb");

    // get a list of the collections in this database and print them out
    Set<String> collectionNames = db.getCollectionNames();
    for (final String s : collectionNames) {
        System.out.println(s);
    }

    // get a collection object to work with
    DBCollection coll = db.getCollection("testCollection");

    // drop all the data in it
    coll.drop();

    // make a document and insert it
    BasicDBObject doc = new BasicDBObject("name", "MongoDB").append("type", "database").append("count", 1)
            .append("info", new BasicDBObject("x", 203).append("y", 102));

    coll.insert(doc);

    // get it (since it's the only one in there since we dropped the rest earlier on)
    DBObject myDoc = coll.findOne();
    System.out.println(myDoc);

    // now, lets add lots of little documents to the collection so we can explore queries and cursors
    for (int i = 0; i < 100; i++) {
        coll.insert(new BasicDBObject().append("i", i));
    }
    System.out
            .println("total # of documents after inserting 100 small ones (should be 101) " + coll.getCount());

    // lets get all the documents in the collection and print them out
    DBCursor cursor = coll.find();
    try {
        while (cursor.hasNext()) {
            System.out.println(cursor.next());
        }
    } finally {
        cursor.close();
    }

    // now use a query to get 1 document out
    BasicDBObject query = new BasicDBObject("i", 71);
    cursor = coll.find(query);

    try {
        while (cursor.hasNext()) {
            System.out.println(cursor.next());
        }
    } finally {
        cursor.close();
    }

    // $ Operators are represented as strings
    query = new BasicDBObject("j", new BasicDBObject("$ne", 3)).append("k", new BasicDBObject("$gt", 10));

    cursor = coll.find(query);

    try {
        while (cursor.hasNext()) {
            System.out.println(cursor.next());
        }
    } finally {
        cursor.close();
    }

    // now use a range query to get a larger subset
    // find all where i > 50
    query = new BasicDBObject("i", new BasicDBObject("$gt", 50));
    cursor = coll.find(query);

    try {
        while (cursor.hasNext()) {
            System.out.println(cursor.next());
        }
    } finally {
        cursor.close();
    }

    // range query with multiple constraints
    query = new BasicDBObject("i", new BasicDBObject("$gt", 20).append("$lte", 30));
    cursor = coll.find(query);

    try {
        while (cursor.hasNext()) {
            System.out.println(cursor.next());
        }
    } finally {
        cursor.close();
    }

    // Count all documents in a collection but take a maximum second to do so
    coll.find().maxTime(1, SECONDS).count();

    // Bulk operations
    BulkWriteOperation builder = coll.initializeOrderedBulkOperation();
    builder.insert(new BasicDBObject("_id", 1));
    builder.insert(new BasicDBObject("_id", 2));
    builder.insert(new BasicDBObject("_id", 3));

    builder.find(new BasicDBObject("_id", 1)).updateOne(new BasicDBObject("$set", new BasicDBObject("x", 2)));
    builder.find(new BasicDBObject("_id", 2)).removeOne();
    builder.find(new BasicDBObject("_id", 3)).replaceOne(new BasicDBObject("_id", 3).append("x", 4));

    BulkWriteResult result = builder.execute();
    System.out.println("Ordered bulk write result : " + result);

    // Unordered bulk operation - no guarantee of order of operation
    builder = coll.initializeUnorderedBulkOperation();
    builder.find(new BasicDBObject("_id", 1)).removeOne();
    builder.find(new BasicDBObject("_id", 2)).removeOne();

    result = builder.execute();
    System.out.println("Ordered bulk write result : " + result);

    // parallelScan
    ParallelScanOptions parallelScanOptions = ParallelScanOptions.builder().numCursors(3).batchSize(300)
            .build();

    List<Cursor> cursors = coll.parallelScan(parallelScanOptions);
    for (Cursor pCursor : cursors) {
        while (pCursor.hasNext()) {
            System.out.println(pCursor.next());
        }
    }

    // release resources
    db.dropDatabase();
    mongoClient.close();
}

From source file:fr.gouv.vitam.mdbes.MainIngestMDBESFromFile.java

License:Open Source License

private static final void runOnce(final MongoDbAccess dbvitam)
        throws InterruptedException, InstantiationException, IllegalAccessException, IOException {
    System.out.println("Load starting... ");
    int nbThread = ingest.length;

    final long date11 = System.currentTimeMillis();
    if (ingest.length == 1) {
        final FileInputStream fstream = new FileInputStream(ingest[0]);
        final DataInputStream in = new DataInputStream(fstream);
        final BufferedReader br = new BufferedReader(new InputStreamReader(in));
        String strLine;/* w w w  .  j  a v a 2  s.  c  o  m*/
        int nb = 0;
        final HashMap<String, String> esIndex = new HashMap<>();
        BulkWriteOperation bulk = dbvitam.daips.collection.initializeUnorderedBulkOperation();
        while ((strLine = br.readLine()) != null) {
            final DBObject bson = (DBObject) JSON.parse(strLine);
            bulk.insert(bson);
            ElasticSearchAccess.addEsIndex(dbvitam, model, esIndex, bson);
            nb++;
            if (nb % GlobalDatas.LIMIT_MDB_NEW_INDEX == 0) {
                BulkWriteResult result = bulk.execute();
                int check = result.getInsertedCount();
                if (check != nb) {
                    System.out.print("x");
                } else {
                    System.out.print(".");
                }
                bulk = dbvitam.daips.collection.initializeUnorderedBulkOperation();
                MainIngestFile.cptMaip.addAndGet(check);
                nb = 0;
            }
        }
        if (!esIndex.isEmpty()) {
            System.out.println("Last bulk ES");
            dbvitam.addEsEntryIndex(true, esIndex, model);
            esIndex.clear();
        }
        if (nb != 0) {
            bulk.execute();
            MainIngestFile.cptMaip.addAndGet(nb);
            nb = 0;
        }
    } else {
        // threads
        ExecutorService executorService = Executors.newFixedThreadPool(ingest.length + 1);
        for (int i = 0; i < ingest.length; i++) {
            MainIngestMDBESFromFile ingestrun = new MainIngestMDBESFromFile();
            ingestrun.file = ingest[i];
            executorService.execute(ingestrun);
        }
        // ES
        MainIngestMDBESFromFile ingestrun = new MainIngestMDBESFromFile();
        ingestrun.file = null;
        ingestrun.files = ingest;
        ingestrun.original = dbvitam;
        executorService.execute(ingestrun);

        executorService.shutdown();
        while (!executorService.awaitTermination(10000, TimeUnit.MILLISECONDS)) {
            ;
        }
        System.out.println("Load ended");
        final long nbBigM = dbvitam.getDaipSize();
        final long nbBigD = dbvitam.getPaipSize();
        System.out.println("\n Big Test (" + nbThread + " nb MAIP: " + MainIngestFile.cptMaip.get()
                + ") with MAIP: " + nbBigM + " DATA: " + nbBigD + " => Load:"
                + (loadt.get()) / ((float) MainIngestFile.cptMaip.get() * nbThread));

        System.out.println("\nThread;nbLoad;nbTotal;Load");
        System.out.println(nbThread + ";" + MainIngestFile.cptMaip.get() + ";" + nbBigM + ";"
                + (loadt.get()) / ((float) MainIngestFile.cptMaip.get() * nbThread));
    }
    final long date12 = System.currentTimeMillis();
    MainIngestMDBESFromFile.loadt.set(date12 - date11);

    System.out.println("Load ended");
    /*
     * System.out.println("All elements\n================================================================");
     * DbVitam.printStructure(dbvitam);
     */
    final long nbBigM = dbvitam.getDaipSize();
    final long nbBigD = dbvitam.getPaipSize();
    System.out.println("\n Big Test (" + nbThread + " Threads chacune " + MainIngestFile.nb + " nb MAIP: "
            + MainIngestFile.cptMaip.get() + ") with MAIP: " + nbBigM + " DATA: " + nbBigD + " => Load:"
            + (MainIngestMDBESFromFile.loadt.get()) / ((float) MainIngestFile.cptMaip.get()));

    System.out.println("\nThread;nbLoad;nbTotal;Load");
    System.out.println(nbThread + ";" + MainIngestFile.cptMaip.get() + ";" + nbBigM + ";"
            + (MainIngestMDBESFromFile.loadt.get()) / ((float) MainIngestFile.cptMaip.get()));
}