Example usage for com.mongodb.client.model ReplaceOneModel ReplaceOneModel

List of usage examples for com.mongodb.client.model ReplaceOneModel ReplaceOneModel

Introduction

In this page you can find the example usage for com.mongodb.client.model ReplaceOneModel ReplaceOneModel.

Prototype

public ReplaceOneModel(final Bson filter, final T replacement, final ReplaceOptions options) 

Source Link

Document

Construct a new instance.

Usage

From source file:at.grahsl.kafka.connect.mongodb.cdc.debezium.mongodb.MongoDbInsert.java

License:Apache License

@Override
public WriteModel<BsonDocument> perform(SinkDocument doc) {

    BsonDocument valueDoc = doc.getValueDoc()
            .orElseThrow(() -> new DataException("error: value doc must not be missing for insert operation"));

    try {/*  ww w.  j a  v a  2  s . co m*/
        BsonDocument insertDoc = BsonDocument.parse(valueDoc.get(JSON_DOC_FIELD_PATH).asString().getValue());
        return new ReplaceOneModel<>(
                new BsonDocument(DBCollection.ID_FIELD_NAME, insertDoc.get(DBCollection.ID_FIELD_NAME)),
                insertDoc, UPDATE_OPTIONS);
    } catch (Exception exc) {
        throw new DataException(exc);
    }

}

From source file:at.grahsl.kafka.connect.mongodb.cdc.debezium.mongodb.MongoDbUpdate.java

License:Apache License

@Override
public WriteModel<BsonDocument> perform(SinkDocument doc) {

    BsonDocument valueDoc = doc.getValueDoc()
            .orElseThrow(() -> new DataException("error: value doc must not be missing for update operation"));

    try {/*  www  .  j a  v  a 2s.c o  m*/

        BsonDocument updateDoc = BsonDocument.parse(valueDoc.getString(JSON_DOC_FIELD_PATH).getValue());

        //patch contains full new document for replacement
        if (updateDoc.containsKey(DBCollection.ID_FIELD_NAME)) {
            BsonDocument filterDoc = new BsonDocument(DBCollection.ID_FIELD_NAME,
                    updateDoc.get(DBCollection.ID_FIELD_NAME));
            return new ReplaceOneModel<>(filterDoc, updateDoc, UPDATE_OPTIONS);
        }

        //patch contains idempotent change only to update original document with
        BsonDocument keyDoc = doc.getKeyDoc().orElseThrow(
                () -> new DataException("error: key doc must not be missing for update operation"));

        BsonDocument filterDoc = BsonDocument.parse("{" + DBCollection.ID_FIELD_NAME + ":"
                + keyDoc.getString(MongoDbHandler.JSON_ID_FIELD_PATH).getValue() + "}");

        return new UpdateOneModel<>(filterDoc, updateDoc);

    } catch (DataException exc) {
        exc.printStackTrace();
        throw exc;
    } catch (Exception exc) {
        exc.printStackTrace();
        throw new DataException(exc.getMessage(), exc);
    }

}

From source file:at.grahsl.kafka.connect.mongodb.cdc.debezium.rdbms.RdbmsInsert.java

License:Apache License

@Override
public WriteModel<BsonDocument> perform(SinkDocument doc) {

    BsonDocument keyDoc = doc.getKeyDoc()
            .orElseThrow(() -> new DataException("error: key doc must not be missing for insert operation"));

    BsonDocument valueDoc = doc.getValueDoc()
            .orElseThrow(() -> new DataException("error: value doc must not be missing for insert operation"));

    try {//from w  w  w.ja va 2s  .  c o m
        BsonDocument filterDoc = RdbmsHandler.generateFilterDoc(keyDoc, valueDoc, OperationType.CREATE);
        BsonDocument upsertDoc = RdbmsHandler.generateUpsertOrReplaceDoc(keyDoc, valueDoc, filterDoc);
        return new ReplaceOneModel<>(filterDoc, upsertDoc, UPDATE_OPTIONS);
    } catch (Exception exc) {
        throw new DataException(exc);
    }

}

From source file:at.grahsl.kafka.connect.mongodb.cdc.debezium.rdbms.RdbmsUpdate.java

License:Apache License

@Override
public WriteModel<BsonDocument> perform(SinkDocument doc) {

    BsonDocument keyDoc = doc.getKeyDoc()
            .orElseThrow(() -> new DataException("error: key doc must not be missing for update operation"));

    BsonDocument valueDoc = doc.getValueDoc()
            .orElseThrow(() -> new DataException("error: value doc must not be missing for update operation"));

    try {/*from   w w w  . j  ava2s  .  c om*/
        BsonDocument filterDoc = RdbmsHandler.generateFilterDoc(keyDoc, valueDoc, OperationType.UPDATE);
        BsonDocument replaceDoc = RdbmsHandler.generateUpsertOrReplaceDoc(keyDoc, valueDoc, filterDoc);
        return new ReplaceOneModel<>(filterDoc, replaceDoc, UPDATE_OPTIONS);
    } catch (Exception exc) {
        throw new DataException(exc);
    }

}

From source file:com.everydots.kafka.connect.mongodb.cdc.debezium.mysql.MysqlInsert.java

License:Apache License

@Override
public WriteModel<BsonDocument> perform(SinkDocument doc) {

    BsonDocument keyDoc = doc.getKeyDoc()
            .orElseThrow(() -> new DataException("error: key doc must not be missing for insert operation"));

    BsonDocument valueDoc = doc.getValueDoc()
            .orElseThrow(() -> new DataException("error: value doc must not be missing for insert operation"));

    try {//from w w w.ja  va2s.  c  om
        BsonDocument filterDoc = MysqlHandler.generateFilterDoc(keyDoc, valueDoc, OperationType.CREATE);
        BsonDocument upsertDoc = MysqlHandler.generateUpsertOrReplaceDoc(keyDoc, valueDoc, filterDoc);
        return new ReplaceOneModel<>(filterDoc, upsertDoc, UPDATE_OPTIONS);
    } catch (Exception exc) {
        throw new DataException(exc);
    }

}

From source file:com.everydots.kafka.connect.mongodb.cdc.debezium.mysql.MysqlUpdate.java

License:Apache License

@Override
public WriteModel<BsonDocument> perform(SinkDocument doc) {

    BsonDocument keyDoc = doc.getKeyDoc()
            .orElseThrow(() -> new DataException("error: key doc must not be missing for update operation"));

    BsonDocument valueDoc = doc.getValueDoc()
            .orElseThrow(() -> new DataException("error: value doc must not be missing for update operation"));

    try {/*w w  w  .ja  va  2s .  c o  m*/
        BsonDocument filterDoc = MysqlHandler.generateFilterDoc(keyDoc, valueDoc, OperationType.UPDATE);
        BsonDocument replaceDoc = MysqlHandler.generateUpsertOrReplaceDoc(keyDoc, valueDoc, filterDoc);
        return new ReplaceOneModel<>(filterDoc, replaceDoc, UPDATE_OPTIONS);
    } catch (Exception exc) {
        throw new DataException(exc);
    }

}

From source file:com.everydots.kafka.connect.mongodb.MongoDbSinkTask.java

License:Apache License

private List<? extends WriteModel<BsonDocument>> buildWriteModel(Collection<SinkRecord> records) {

    List<ReplaceOneModel<BsonDocument>> docsToWrite = new ArrayList<>(records.size());

    records.forEach(record -> {/*w ww  .j a va  2 s  .  c om*/
        SinkDocument doc = sinkConverter.convert(record);
        processorChain.process(doc, record);
        doc.getValueDoc()
                .ifPresent(vd -> docsToWrite.add(new ReplaceOneModel<>(
                        new BsonDocument(DBCollection.ID_FIELD_NAME, vd.get(DBCollection.ID_FIELD_NAME)), vd,
                        UPDATE_OPTIONS)));
    });

    return docsToWrite;
}

From source file:com.hurence.logisland.service.mongodb.MongoDBUpdater.java

License:Apache License

@Override
public void run() {
    List<Tuple<Document, Bson>> batchBuffer = new ArrayList<>();

    while (true) {
        try {/*from ww  w .j ava  2s  . co m*/
            Tuple<Record, Bson> record = records.poll(flushInterval, TimeUnit.MILLISECONDS);
            if (record != null) {
                batchBuffer.add(new Tuple<>(RecordConverter.convert(record.getKey()), record.getValue()));
            }
            long currentTS = System.nanoTime();
            if (batchBuffer.size() > 0
                    && ((currentTS - lastTS) >= flushInterval * 1000000 || batchBuffer.size() >= batchSize)) {
                //use moustache operator to avoid composing strings when not needed
                logger.debug("committing {} records to Mongo after {} ns", batchBuffer.size(),
                        (currentTS - lastTS));

                if (MongoDBControllerService.BULK_MODE_UPSERT.getValue().equals(bulkMode)) {
                    ReplaceOptions replaceOptions = new ReplaceOptions().upsert(true);
                    //split batches by 500 document each max
                    for (int i = 0; i < batchBuffer.size(); i += 500) {
                        try {
                            col.bulkWrite(
                                    batchBuffer.stream().skip(i).limit(500)
                                            .map(document -> new ReplaceOneModel<>(document.getValue(),
                                                    document.getKey(), replaceOptions))
                                            .collect(Collectors.toList()));
                        } catch (MongoBulkWriteException bwe) {
                            bwe.getWriteErrors().forEach(error -> {
                                if (error.getCode() != 11000) {
                                    logger.warn("MongoDB updater got error: {}", error);
                                }
                            });
                        }
                    }
                } else {
                    col.insertMany(batchBuffer.stream().map(Tuple::getKey).collect(Collectors.toList()));
                }
                lastTS = currentTS;
                batchBuffer = new ArrayList<>();
            }

        } catch (InterruptedException e) {
            //here we should exit the loop
            logger.info("Interrupted while waiting: {}", e.getMessage());
            break;
        } catch (Exception e) {
            logger.error("Unrecoverable error from MongoDB updater. Loosing data!", e);
            batchBuffer.clear();
            lastTS = System.nanoTime();
        }
    }
}

From source file:com.streamsets.pipeline.stage.destination.mongodb.MongoDBTarget.java

License:Apache License

@Override
public void write(Batch batch) throws StageException {
    Iterator<Record> records = batch.getRecords();
    List<WriteModel<Document>> documentList = new ArrayList<>();
    List<Record> recordList = new ArrayList<>();
    while (records.hasNext()) {
        Record record = records.next();//  www  . j  a va  2  s .c  o m
        try {
            ByteArrayOutputStream baos = new ByteArrayOutputStream(DEFAULT_CAPACITY);
            DataGenerator generator = generatorFactory.getGenerator(baos);
            generator.write(record);
            generator.close();
            Document document = Document.parse(new String(baos.toByteArray()));

            //create a write model based on record header
            if (!record.getHeader().getAttributeNames().contains(OPERATION_KEY)) {
                LOG.error(Errors.MONGODB_15.getMessage(), record.getHeader().getSourceId());
                throw new OnRecordErrorException(Errors.MONGODB_15, record.getHeader().getSourceId());
            }

            String operation = record.getHeader().getAttribute(OPERATION_KEY);
            switch (operation) {
            case INSERT:
                documentList.add(new InsertOneModel<>(document));
                recordList.add(record);
                break;
            case UPSERT:
                validateUniqueKey(operation, record);
                recordList.add(record);
                documentList.add(new ReplaceOneModel<>(
                        new Document(removeLeadingSlash(mongoTargetConfigBean.uniqueKeyField),
                                record.get(mongoTargetConfigBean.uniqueKeyField).getValueAsString()),
                        document, new UpdateOptions().upsert(true)));
                break;
            case DELETE:
                recordList.add(record);
                documentList.add(new DeleteOneModel<Document>(document));
                break;
            default:
                LOG.error(Errors.MONGODB_14.getMessage(), operation, record.getHeader().getSourceId());
                throw new StageException(Errors.MONGODB_14, operation, record.getHeader().getSourceId());
            }
        } catch (IOException | StageException e) {
            errorRecordHandler.onError(new OnRecordErrorException(record, Errors.MONGODB_13, e.toString(), e));
        }
    }

    if (!documentList.isEmpty()) {
        try {
            BulkWriteResult bulkWriteResult = coll.bulkWrite(documentList);
            if (bulkWriteResult.wasAcknowledged()) {
                LOG.trace("Wrote batch with {} inserts, {} updates and {} deletes",
                        bulkWriteResult.getInsertedCount(), bulkWriteResult.getModifiedCount(),
                        bulkWriteResult.getDeletedCount());
            }
        } catch (MongoException e) {
            for (Record record : recordList) {
                errorRecordHandler
                        .onError(new OnRecordErrorException(record, Errors.MONGODB_17, e.toString(), e));
            }
        }
    }
}

From source file:io.mandrel.cluster.node.impl.MongoNodeRepository.java

License:Apache License

@Override
public void update(List<Node> nodes) {
    if (CollectionUtils.isNotEmpty(nodes)) {
        collection.bulkWrite(nodes.stream().map(node -> {
            try {
                return new ReplaceOneModel<Document>(new Document("_id", node.getId()),
                        Document.parse(mapper.writeValueAsString(node)), new UpdateOptions().upsert(true));
            } catch (Exception e) {
                throw Throwables.propagate(e);
            }//from   w  w  w.  j  a v a 2 s  .c  o  m
        }).collect(Collectors.toList()));
    }
}