Example usage for com.mongodb CursorType TailableAwait

List of usage examples for com.mongodb CursorType TailableAwait

Introduction

In this page you can find the example usage for com.mongodb CursorType TailableAwait.

Prototype

CursorType TailableAwait

To view the source code for com.mongodb CursorType TailableAwait.

Click Source Link

Document

A tailable cursor with a built-in server sleep before returning an empty batch.

Usage

From source file:com.eightkdata.mongowp.client.wrapper.MongoConnectionWrapper.java

License:Open Source License

private CursorType toCursorType(QueryOptions queryOptions) {
    if (!queryOptions.isTailable()) {
        return CursorType.NonTailable;
    }/*from  www.j a v a 2  s .c o m*/
    if (queryOptions.isAwaitData()) {
        return CursorType.TailableAwait;
    }
    return CursorType.Tailable;
}

From source file:com.streamsets.pipeline.stage.origin.mongodb.AbstractMongoDBSource.java

License:Apache License

private void checkCursor(List<ConfigIssue> issues) {
    //According to MongoDB Oplog: https://docs.mongodb.com/manual/reference/method/cursor.batchSize/
    //We should not use batch size of 1, and in mongo db world batch size of 1 is special
    //and equal to specifying limit 1, so the below queries will simply use limit 1
    //rather than batchsize and limit both 1.
    if (configBean.isCapped) {
        try {//from   www.jav a  2 s. c  o m
            mongoCollection.find().cursorType(CursorType.TailableAwait).limit(1).iterator().close();
        } catch (MongoQueryException e) {
            LOG.error("Error during Mongo Query in checkCursor: {}", e);
            issues.add(getContext().createConfigIssue(Groups.MONGODB.name(),
                    MongoDBConfig.MONGO_CONFIG_PREFIX + "collection", Errors.MONGODB_04,
                    configBean.mongoConfig.collection, e.toString()));
        }
    } else {
        try {
            mongoCollection.find().cursorType(CursorType.NonTailable).limit(1).iterator().close();
        } catch (MongoQueryException e) {
            LOG.error("Error during Mongo Query in checkCursor: {}", e);
            issues.add(getContext().createConfigIssue(Groups.MONGODB.name(),
                    MongoDBConfig.MONGO_CONFIG_PREFIX + "collection", Errors.MONGODB_06,
                    configBean.mongoConfig.collection, e.toString()));
        }
    }
}

From source file:com.streamsets.pipeline.stage.origin.mongodb.MongoDBSource.java

License:Apache License

private void prepareCursor(int maxBatchSize, String offsetField, String lastSourceOffset) {
    createMongoClient();/*from  ww  w.j a  v  a  2s .com*/

    ObjectId offset;
    if (null == cursor) {
        if (null == lastSourceOffset || lastSourceOffset.isEmpty()) {
            offset = initialObjectId;
        } else {
            offset = new ObjectId(lastSourceOffset);
        }
        LOG.debug("Getting new cursor with params: {} {} {}", maxBatchSize, offsetField, lastSourceOffset);
        if (isCapped) {
            cursor = mongoCollection.find().filter(Filters.gt(offsetField, offset))
                    .cursorType(CursorType.TailableAwait).batchSize(maxBatchSize).iterator();
        } else {
            cursor = mongoCollection.find().filter(Filters.gt(offsetField, offset))
                    .sort(Sorts.ascending(offsetField)).cursorType(CursorType.NonTailable)
                    .batchSize(maxBatchSize).iterator();
        }
    }
}

From source file:com.streamsets.pipeline.stage.origin.mongodb.MongoDBSource.java

License:Apache License

private void checkCursor(List<ConfigIssue> issues) {
    if (isCapped) {
        try {//from www  .  j a  v  a2s . c  om
            mongoCollection.find().cursorType(CursorType.TailableAwait).batchSize(1).limit(1).iterator()
                    .close();
        } catch (MongoQueryException e) {
            issues.add(getContext().createConfigIssue(Groups.MONGODB.name(), "collection", Errors.MONGODB_04,
                    mongoDatabaseName, e.toString()));
        }
    } else {
        try {
            mongoCollection.find().cursorType(CursorType.NonTailable).batchSize(1).limit(1).iterator().close();
        } catch (MongoQueryException e) {
            issues.add(getContext().createConfigIssue(Groups.MONGODB.name(), "collection", Errors.MONGODB_06,
                    mongoDatabaseName, e.toString()));
        }
    }
}

From source file:es.omarall.mtc.TailingTask.java

License:Apache License

/**
 * Builds a tailable & awaitdata cursor to fetch documents from the
 * documents collection.//from  ww w  .j a  v  a 2 s  .  c om
 * 
 * @return
 */
public MongoCursor<Document> buildCursor() {

    if (lastTrackedId == null) {
        return cappedCollection.find().sort(new Document("$natural", 1)).cursorType(CursorType.TailableAwait)
                .iterator();
    } else {

        // we know we processed the document with "_id": lastTrackedId
        // We are interested in the first document with id greater than
        // lastTrackedId
        return cappedCollection

                .find(Filters.gt("_id", lastTrackedId))

                .sort(new Document("$natural", 1)).cursorType(CursorType.TailableAwait).iterator();
    }
}

From source file:flipkart.mongo.replicator.node.ReplicationTask.java

License:Apache License

@Override
public void run() {
    String shardId = rsConfig.shardName;
    Node masterNode = rsConfig.getMasterNode().get();
    MongoClient client = MongoConnector.getMongoClient(Lists.newArrayList(masterNode));

    MongoDatabase database = client.getDatabase("local");
    lastCp = taskContext.checkPointHandler.getCheckPoint(shardId);

    logger.info(String.format("######## START REPLICATOR FOR MongoURI: %s. LastCheckpoint: %s #######",
            client.getAddress(), lastCp));
    MongoCollection<Document> collection = database.getCollection("oplog.rs");
    FindIterable<Document> iterable;
    MongoCursor<Document> cursor;
    do {// w w  w . ja  v a2 s .co  m
        if (lastCp == null) {
            iterable = collection.find();
        } else {
            iterable = collection.find(new Document("ts", new Document("$gt", lastCp)));
        }
        cursor = iterable.sort(new Document("$natural", 1)).noCursorTimeout(true)
                .cursorType(CursorType.TailableAwait).batchSize(3000).iterator();
        try {
            executeCursor(cursor);
            Thread.sleep(WAIT_FOR_NEXT_ITERATION);
        } catch (MongoCursorNotFoundException e) {
            logger.info("Cursor has been closed. About to open a new cursor. ID: "
                    + cursor.getServerCursor().getId());
        } catch (Exception e) {
            logger.error("Exception while replicating", e);
            throw new RuntimeException(e);
        } finally {
            cursor.close();
        }
    } while (true);
}

From source file:info.bunji.mongodb.synces.OplogExtractor.java

License:Apache License

@Override
protected void execute() throws Exception {

    Set<String> includeFields = config.getIncludeFields();
    Set<String> excludeFields = config.getExcludeFields();
    String index = config.getDestDbName();
    String syncName = config.getSyncName();

    // oplog????/*from  w  ww.ja va2s . c  o m*/
    int checkPoint = 0;
    int retryCnt = 0;
    while (true) {
        try (MongoClient client = MongoClientService.getClient(config)) {
            retryCnt = 0;

            logger.info("[{}] starting oplog sync.", syncName);

            // check oplog timestamp outdated
            MongoCollection<Document> oplogCollection = client.getDatabase("local").getCollection("oplog.rs");
            FindIterable<Document> results;
            if (timestamp != null) {
                results = oplogCollection.find().filter(Filters.lte("ts", timestamp))
                        .sort(new Document("$natural", -1)).limit(1);
                if (results.first() == null) {
                    throw new IllegalStateException("[" + syncName + "] oplog outdated.["
                            + DocumentUtils.toDateStr(timestamp) + "(" + timestamp + ")]");
                }
                //logger.trace("[{}] start oplog timestamp = [{}]", config.getSyncName(), timestamp);
                //config.addSyncCount(-1);   // ?????????????

                BsonTimestamp tmpTs = results.first().get("ts", BsonTimestamp.class);
                if (!tmpTs.equals(timestamp)) {
                    // ?????mongo????????????
                    // ??????????
                    timestamp = tmpTs;

                    config.setStatus(Status.RUNNING);
                    config.setLastOpTime(timestamp);
                    append(SyncOperation.fromConfig(config));
                }
            }

            // oplog??
            targetDb = client.getDatabase(config.getMongoDbName());
            results = oplogCollection.find().filter(Filters.gte("ts", timestamp))
                    .sort(new Document("$natural", 1)).cursorType(CursorType.TailableAwait)
                    .noCursorTimeout(true).oplogReplay(true);

            logger.info("[{}] started oplog sync. [oplog {} ({})]", syncName,
                    DocumentUtils.toDateStr(timestamp), timestamp);

            // get document from oplog
            for (Document oplog : results) {

                // TODO ???SyncOperation???????
                SyncOperation op = null;
                timestamp = oplog.get("ts", BsonTimestamp.class);
                if (!"c".equals(oplog.get("op"))) {
                    //if (!Operation.COMMAND.equals(Operation.valueOf(oplog.get("op")))) {
                    // cmd
                    String ns = oplog.getString("ns");
                    String[] nsVals = ns.split("\\.", 2);
                    if (!config.getMongoDbName().equals(nsVals[0]) || !config.isTargetCollection(nsVals[1])) {
                        if (++checkPoint >= 10000) {
                            // ????????
                            config.setLastOpTime(timestamp);
                            op = SyncOperation.fromConfig(config);
                            checkPoint = 0; // clear check count
                            append(op);
                        }
                        continue;
                    } else {
                        op = new SyncOperation(oplog, index);
                        checkPoint = 0;
                    }
                } else {
                    // cmd??????????
                    op = new SyncOperation(oplog, index);
                    if (!config.getMongoDbName().equals(op.getSrcDbName())
                            || !config.isTargetCollection(op.getCollection())) {
                        checkPoint++;
                        continue;
                    }
                }

                /*
                               SyncOperation op = new SyncOperation(oplog, index);
                        
                               timestamp = op.getTimestamp();
                        
                               // check target database and collection
                               if(!config.getMongoDbName().equals(op.getSrcDbName()) || !config.isTargetCollection(op.getCollection())) {
                                  if (++checkPoint >= 10000) {
                                     // ????????
                                     config.setLastOpTime(timestamp);
                                     op = SyncOperation.fromConfig(config);
                                     checkPoint = 0;      // clear check count
                                     append(op);
                                  }
                                  continue;
                               } else {
                                  checkPoint = 0;
                               }
                */
                if (op.isPartialUpdate()) {
                    // get full document
                    MongoCollection<Document> collection = getMongoCollection(op.getCollection());
                    Document updateDoc = collection.find(oplog.get("o2", Document.class)).first();
                    if (updateDoc == null) {
                        checkPoint++;
                        continue; // deleted document
                    }
                    op.setDoc(updateDoc);
                }

                // filter document(insert or update)
                if (op.getDoc() != null) {
                    Document filteredDoc = DocumentUtils.applyFieldFilter(op.getDoc(), includeFields,
                            excludeFields);
                    if (filteredDoc.isEmpty()) {
                        checkPoint++;
                        continue; // no change sync fields
                    }
                    op.setDoc(filteredDoc);
                }

                // emit sync data
                append(op);
            }
        } catch (MongoClientException mce) {
            // do nothing.
        } catch (UnknownHostException | MongoSocketException mse) {
            retryCnt++;
            if (retryCnt >= MAX_RETRY) {
                logger.error(String.format("[%s] mongo connect failed. (RETRY=%d)", syncName, retryCnt), mse);
                throw mse;
            }
            long waitSec = (long) Math.min(60, Math.pow(2, retryCnt));
            logger.warn("[{}] waiting mongo connect retry. ({}/{}) [{}sec]", syncName, retryCnt, MAX_RETRY,
                    waitSec);

            Thread.sleep(waitSec * 1000);
        } catch (MongoInterruptedException mie) {
            // interrupt oplog tailable process.
            break;
        } catch (Throwable t) {
            logger.error(String.format("[%s] error. [msg:%s](%s)", syncName, t.getMessage(),
                    t.getClass().getSimpleName()), t);
            throw t;
        }
    }
}

From source file:io.debezium.connector.mongodb.Replicator.java

License:Apache License

/**
 * Use the given primary to read the oplog.
 * //from   w  ww.ja  v  a2s  . c o m
 * @param primary the connection to the replica set's primary node; may not be null
 */
protected void readOplog(MongoClient primary) {
    BsonTimestamp oplogStart = source.lastOffsetTimestamp(replicaSet.replicaSetName());
    logger.info("Reading oplog for '{}' primary {} starting at {}", replicaSet, primary.getAddress(),
            oplogStart);

    // Include none of the cluster-internal operations and only those events since the previous timestamp ...
    MongoCollection<Document> oplog = primary.getDatabase("local").getCollection("oplog.rs");
    Bson filter = Filters.and(Filters.gt("ts", oplogStart), // start just after our last position
            Filters.exists("fromMigrate", false)); // skip internal movements across shards
    FindIterable<Document> results = oplog.find(filter).sort(new Document("$natural", 1)) // force forwards collection scan
            .oplogReplay(true) // tells Mongo to not rely on indexes
            .noCursorTimeout(true) // don't timeout waiting for events
            .cursorType(CursorType.TailableAwait); // tail and await new data
    // Read as much of the oplog as we can ...
    ServerAddress primaryAddress = primary.getAddress();
    try (MongoCursor<Document> cursor = results.iterator()) {
        while (running.get() && cursor.hasNext()) {
            if (!handleOplogEvent(primaryAddress, cursor.next())) {
                // Something happened, and we're supposed to stop reading
                return;
            }
        }
    }
}

From source file:io.mandrel.timeline.impl.MongoTimelineRepository.java

License:Apache License

@Override
public void pool(Listener listener) {

    LocalDateTime date = LocalDateTime.now();
    Bson query = Filters.gt("time", date);

    try {/*from ww  w.  j  a  v  a 2s  .com*/
        while (true) {
            MongoCursor<Document> cursor = timeline.find(query).cursorType(CursorType.TailableAwait).iterator();

            while (true) {
                if (!cursor.hasNext()) {
                    if (cursor.getServerCursor() == null) {
                        break;
                    }
                    continue;
                }

                Document result = cursor.next();
                try {
                    Event event = mapper.readValue(result.toJson(), Event.class);
                    date = event.getTime();

                    listener.on(event);
                } catch (Exception e) {
                    log.warn("Error while getting the event", e);
                }
            }

            query = Filters.gt("time", date);
        }
    } catch (Exception e) {
        log.warn("Event pool process is down!", e);
    }
}

From source file:me.tfeng.play.mongodb.OplogListener.java

License:Apache License

protected CursorType getCursorType() {
    return CursorType.TailableAwait;
}