Example usage for com.mongodb WriteConcern SAFE

List of usage examples for com.mongodb WriteConcern SAFE

Introduction

In this page you can find the example usage for com.mongodb WriteConcern SAFE.

Prototype

WriteConcern SAFE

To view the source code for com.mongodb WriteConcern SAFE.

Click Source Link

Document

Write operations that use this write concern will wait for acknowledgement from the primary server before returning.

Usage

From source file:com.redhat.lightblue.crud.mongo.MongoLocking.java

License:Open Source License

/**
 * Attempts to insert a lock record to the db
 *
 * @returns true if successful, false if lock already exists. Any other case would be an exception.
 *///from ww w.  ja  v a2s.com
private boolean acquire(String callerId, String resourceId, Long ttl, Date now, Date expiration) {
    BasicDBObject update = new BasicDBObject().append(CALLERID, callerId).append(RESOURCEID, resourceId)
            .append(TIMESTAMP, now).append(TTL, ttl).append(EXPIRATION, expiration).append(COUNT, 1)
            .append(VERSION, 1);

    try {
        LOGGER.debug("insert: {}", update);
        coll.insert(update, WriteConcern.SAFE);
    } catch (MongoException.DuplicateKey e) {
        return false;
    }
    return true;
}

From source file:com.redhat.lightblue.crud.mongo.MongoLocking.java

License:Open Source License

/**
 * Attempt to acquire a lock. If successful, return true, otherwise return false.
 *///from   w  w  w  .j a va 2  s . c o m
public boolean acquire(String callerId, String resourceId, Long ttl) {
    /*
      Creating an atomic acquire() method in mongodb is not
      easy. The key is to use the uniqueness of a unique index, in
      this case, a unique index on resourceId. There can be at
      most one lock record for a resource in the db at any given
      time. 
            
      The insertion operation is atomic: if it is successful, we
      acquire the lock. We assume the update operation is
      transactional: once a document is found to be matching to
      the query of the update operation, it is locked, and no
      other caller can modify that document until our update
      operation is complete.
            
      We will use a version number to make sure we are updating
      the correct doc.
     */
    LOGGER.debug("acquire({}/{},ttl={})", callerId, resourceId, ttl);
    // Try to insert doc
    Date now = new Date();
    Date expiration;
    if (ttl == null)
        ttl = defaultTTL;
    expiration = new Date(now.getTime() + ttl);
    LOGGER.debug("{}/{}: lock will expire on {}", callerId, resourceId, expiration);
    BasicDBObject query;
    BasicDBObject update;
    WriteResult wr;
    int readVer = -1;
    String readCallerId = null;
    int readCount = -1;
    boolean locked = acquire(callerId, resourceId, ttl, now, expiration);
    if (!locked) {
        // At this point, we can add "if expired" predicate to the
        // queries to filter expired locks, but it is not safe to
        // rely on timestamps. Not all nodes have the same
        // timestamp, and any node can wait an arbitrary amount of
        // time at any point. So, we read the existing lock at
        // this point, and use the version number for all the
        // updates. if anybody updates the lock before we do, the
        // version number will change, and we will fail.
        query = new BasicDBObject(RESOURCEID, resourceId);
        LOGGER.debug("find: {}", query);
        DBObject lockObject = coll.findOne(query);
        if (lockObject == null) {
            LOGGER.debug("{}/{}: lock cannot be read. Retrying to acquire", callerId, resourceId);
            locked = acquire(callerId, resourceId, ttl, now, expiration);
            LOGGER.debug("{}/{}: acquire result: {}", callerId, resourceId, locked);
            // No need to continue here. If insertion fails, that means someone else inserted a record
            return locked;
        }
        readVer = ((Number) lockObject.get(VERSION)).intValue();
        readCallerId = (String) lockObject.get(CALLERID);
        readCount = ((Number) lockObject.get(COUNT)).intValue();

        // Lock already exists
        // Possibilities:
        //  - lock is not expired, but ours : increment count
        //  - lock is not expired, but someone else owns it : fail
        //  - lock is expired : attempt to acquire
        //  - lock count is less than 1 : attempt to acquire

        // lock is not expired and we own it: increment lock count
        LOGGER.debug("{}/{} locked, assuming lock is ours, attempting to increment lock count", callerId,
                resourceId);
        if (readCallerId.equals(callerId)) {
            query = new BasicDBObject().append(CALLERID, callerId).append(RESOURCEID, resourceId)
                    .append(EXPIRATION, new BasicDBObject("$gt", now)).append(VERSION, readVer);
            update = new BasicDBObject()
                    .append("$set",
                            new BasicDBObject(TIMESTAMP, now).append(EXPIRATION, expiration).append(TTL, ttl))
                    .append("$inc", new BasicDBObject(VERSION, 1).append(COUNT, 1));
            LOGGER.debug("update: {} {}", query, update);
            wr = coll.update(query, update, false, false, WriteConcern.SAFE);
            if (wr.getN() == 1) {
                LOGGER.debug("{}/{} locked again", callerId, resourceId);
                locked = true;
            }
        }
    }
    if (!locked) {
        // assume lock is expired or count <=0, and try to acquire it
        LOGGER.debug("{}/{} lock is expired or count <= 0, attempting to reacquire expired lock", callerId,
                resourceId);
        query = new BasicDBObject().append(RESOURCEID, resourceId)
                .append("$or",
                        Arrays.asList(new BasicDBObject(EXPIRATION, new BasicDBObject("$lte", now)),
                                new BasicDBObject(COUNT, new BasicDBObject("$lte", 0))))
                .append(VERSION, readVer);
        update = new BasicDBObject()
                .append("$set",
                        new BasicDBObject(CALLERID, callerId).append(TIMESTAMP, now)
                                .append(EXPIRATION, expiration).append(TTL, ttl).append(COUNT, 1))
                .append("$inc", new BasicDBObject(VERSION, 1));
        LOGGER.debug("update: {} {}", query, update);
        wr = coll.update(query, update, false, false, WriteConcern.SAFE);
        if (wr.getN() == 1) {
            LOGGER.debug("{}/{} locked", callerId, resourceId);
            locked = true;
        }
    }
    LOGGER.debug("{}/{}: {}", callerId, resourceId, locked ? "locked" : "not locked");
    return locked;
}

From source file:com.redhat.lightblue.crud.mongo.MongoLocking.java

License:Open Source License

/**
 * Release the lock. Returns true if the lock is released by this call
 *//*from   w  w w  . j  ava 2  s .  c  o  m*/
public boolean release(String callerId, String resourceId) {
    LOGGER.debug("release({}/{})", callerId, resourceId);
    Date now = new Date();
    // If lock count is only one, we can remove the lock
    BasicDBObject query = new BasicDBObject().append(CALLERID, callerId).append(RESOURCEID, resourceId)
            .append(EXPIRATION, new BasicDBObject("$gt", now)).append(COUNT, 1);
    LOGGER.debug("remove {}", query);
    WriteResult wr = coll.remove(query, WriteConcern.SAFE);
    if (wr.getN() == 1) {
        LOGGER.debug("{}/{} released", callerId, resourceId);
        return true;
    }
    // Retrieve the lock
    query = new BasicDBObject(RESOURCEID, resourceId).append(CALLERID, callerId);
    DBObject lock = coll.findOne(query);
    if (lock != null) {
        long ttl = ((Number) lock.get(TTL)).longValue();
        Date expiration = new Date(now.getTime() + ttl);
        int ver = ((Number) lock.get(VERSION)).intValue();
        // Try decrementing the lock count of our lock
        query = new BasicDBObject().append(CALLERID, callerId).append(RESOURCEID, resourceId)
                .append(EXPIRATION, new BasicDBObject("$gt", now)).append(COUNT, new BasicDBObject("$gt", 0))
                .append(VERSION, ver);
        BasicDBObject update = new BasicDBObject()
                .append("$set",
                        new BasicDBObject(EXPIRATION, expiration).append(TTL, ttl).append(TIMESTAMP, now))
                .append("$inc", new BasicDBObject(COUNT, -1).append(VERSION, 1));
        wr = coll.update(query, update, false, false, WriteConcern.SAFE);
        if (wr.getN() == 1) {
            LOGGER.debug("{}/{} lock count decremented, still locked", callerId, resourceId);
            return false;
        }
    }
    // Both attempts failed, Lock is no longer owned by us
    throw new InvalidLockException(resourceId);
}

From source file:com.redhat.lightblue.crud.mongo.MongoLocking.java

License:Open Source License

public void ping(String callerId, String resourceId) {
    Date now = new Date();
    BasicDBObject q = new BasicDBObject().append(CALLERID, callerId).append(RESOURCEID, resourceId)
            .append(EXPIRATION, new BasicDBObject("$gt", now)).append(COUNT, new BasicDBObject("$gt", 0));
    DBObject lock = coll.findOne(q);//w  w  w  . j  a  v  a2s  .c o m
    if (lock != null) {
        Date expiration = new Date(now.getTime() + ((Number) lock.get(TTL)).longValue());
        int ver = ((Number) lock.get(VERSION)).intValue();
        BasicDBObject update = new BasicDBObject()
                .append("$set", new BasicDBObject(TIMESTAMP, now).append(EXPIRATION, expiration))
                .append("$inc", new BasicDBObject(VERSION, 1));
        q = q.append(VERSION, ver);
        WriteResult wr = coll.update(q, update, false, false, WriteConcern.SAFE);
        if (wr.getN() != 1)
            throw new InvalidLockException(resourceId);
        LOGGER.debug("{}/{} pinged", callerId, resourceId);
    } else
        throw new InvalidLockException(resourceId);
}

From source file:com.redhat.lightblue.crud.mongo.MongoSequenceGenerator.java

License:Open Source License

/**
 * Atomically increments and returns the sequence value. If this
 * is the first use of the sequence, the sequence is created
 *
 * @param name The sequence name//from   www.  j av a  2  s.  c  o m
 * @param init The initial value of the sequence. Used only if
 * the sequence does not exists prior to this call
 * @param inc The increment, Could be negative or positive. If 0,
 * it is assumed to be 1. Used only if the sequence does not exist
 * prior to this call
 *
 * If the sequence already exists, the <code>init</code> and
 * <code>inc</code> are ignored.
 *
 * @return The value of the sequence before the call
 */
public long getNextSequenceValue(String name, long init, long inc) {
    LOGGER.debug("getNextSequenceValue({})", name);
    // Read the sequence document
    BasicDBObject q = new BasicDBObject(NAME, name);
    DBObject doc = coll.findOne(q);
    if (doc == null) {
        // Sequence document does not exist. Insert a new document using the init and inc
        LOGGER.debug("inserting sequence record name={}, init={}, inc={}", name, init, inc);
        if (inc == 0)
            inc = 1;
        // Here, we also make sure we have the indexes setup properly
        initIndex();
        BasicDBObject u = new BasicDBObject().append(NAME, name).append(INIT, init).append(INC, inc)
                .append(VALUE, init);
        try {
            coll.insert(u, WriteConcern.SAFE);
        } catch (Exception e) {
            // Someone else might have inserted already, try to re-read
            LOGGER.debug("Insertion failed with {}, trying to read", e);
        }
        doc = coll.findOne(q);
        if (doc == null)
            throw new RuntimeException("Cannot generate value for " + name);
    }
    LOGGER.debug("Sequence doc={}", doc);
    Long increment = (Long) doc.get(INC);
    BasicDBObject u = new BasicDBObject().append("$inc", new BasicDBObject(VALUE, increment));
    // This call returns the unmodified document
    doc = coll.findAndModify(q, u);
    Long l = (Long) doc.get(VALUE);
    LOGGER.debug("{} -> {}", name, l);
    return l;
}

From source file:com.redhat.lightblue.metadata.mongo.MongoMetadata.java

License:Open Source License

@Override
public void createNewMetadata(EntityMetadata md) {
    LOGGER.debug("createNewMetadata: begin");
    checkMetadataHasName(md);//  w w w . ja va 2  s.  c  o m
    checkMetadataHasFields(md);
    checkDataStoreIsValid(md);
    Version ver = checkVersionIsValid(md);
    LOGGER.debug("createNewMetadata: version {}", ver);

    Error.push("createNewMetadata(" + md.getName() + ")");

    // write info and schema as separate docs!
    try {
        if (md.getEntityInfo().getDefaultVersion() != null) {
            if (!md.getEntityInfo().getDefaultVersion().equals(ver.getValue())) {
                validateDefaultVersion(md.getEntityInfo());
            }
            if (md.getStatus() == MetadataStatus.DISABLED) {
                throw Error.get(MongoMetadataConstants.ERR_DISABLED_DEFAULT_VERSION,
                        md.getName() + ":" + md.getEntityInfo().getDefaultVersion());
            }
        }
        LOGGER.debug("createNewMetadata: Default version validated");
        PredefinedFields.ensurePredefinedFields(md);
        DBObject infoObj = (DBObject) mdParser.convert(md.getEntityInfo());
        DBObject schemaObj = (DBObject) mdParser.convert(md.getEntitySchema());

        Error.push("writeEntity");
        try {
            try {
                WriteResult result = new InsertCommand(null, collection, infoObj, WriteConcern.SAFE).execute();
                LOGGER.debug("Inserted entityInfo");
                String error = result.getError();
                if (error != null) {
                    LOGGER.error("createNewMetadata: error in createInfo: {}" + error);
                    throw Error.get(MongoMetadataConstants.ERR_DB_ERROR, error);
                }
            } catch (MongoException.DuplicateKey dke) {
                LOGGER.error("createNewMetadata: duplicateKey {}", dke);
                throw Error.get(MongoMetadataConstants.ERR_DUPLICATE_METADATA, ver.getValue());
            }
            try {
                WriteResult result = new InsertCommand(null, collection, schemaObj, WriteConcern.SAFE)
                        .execute();
                String error = result.getError();
                if (error != null) {
                    LOGGER.error("createNewMetadata: error in createSchema: {}" + error);
                    new RemoveCommand(null, collection, new BasicDBObject(LITERAL_ID, infoObj.get(LITERAL_ID)))
                            .execute();
                    throw Error.get(MongoMetadataConstants.ERR_DB_ERROR, error);
                }
                createUpdateEntityInfoIndexes(md.getEntityInfo());
            } catch (MongoException.DuplicateKey dke) {
                LOGGER.error("createNewMetadata: duplicateKey {}", dke);
                new RemoveCommand(null, collection, new BasicDBObject(LITERAL_ID, infoObj.get(LITERAL_ID)))
                        .execute();
                throw Error.get(MongoMetadataConstants.ERR_DUPLICATE_METADATA, ver.getValue());
            }
        } finally {
            Error.pop();
        }
    } catch (RuntimeException e) {
        LOGGER.error("createNewMetadata", e);
        throw e;
    } finally {
        Error.pop();
    }
    LOGGER.debug("createNewMetadata: end");
}

From source file:com.redhat.lightblue.metadata.mongo.MongoMetadata.java

License:Open Source License

/**
 * Creates a new schema (versioned data) for an existing metadata.
 *
 * @param md/*  ww  w  . jav a  2s  .  co m*/
 */
@Override
public void createNewSchema(EntityMetadata md) {
    checkMetadataHasName(md);
    checkMetadataHasFields(md);
    checkDataStoreIsValid(md);
    Version ver = checkVersionIsValid(md);

    Error.push("createNewSchema(" + md.getName() + ")");

    try {
        // verify entity info exists
        EntityInfo info = getEntityInfo(md.getName());

        if (null == info) {
            throw Error.get(MongoMetadataConstants.ERR_MISSING_ENTITY_INFO, md.getName());
        }

        PredefinedFields.ensurePredefinedFields(md);
        DBObject schemaObj = (DBObject) mdParser.convert(md.getEntitySchema());

        WriteResult result = new InsertCommand(null, collection, schemaObj, WriteConcern.SAFE).execute();
        String error = result.getError();
        if (error != null) {
            throw Error.get(MongoMetadataConstants.ERR_DB_ERROR, error);
        }
    } catch (MongoException.DuplicateKey dke) {
        throw Error.get(MongoMetadataConstants.ERR_DUPLICATE_METADATA, ver.getValue());
    } finally {
        Error.pop();
    }
}

From source file:com.redhat.mongotx.TransactionLogger.java

License:Open Source License

/**
 * Inserts a record into the transaction collection, and returns
 * the new transaction id/*from   w  w  w.  j av a  2  s.  com*/
 */
public String startTransaction() {
    Date date = new Date();
    BasicDBObject txDoc = new BasicDBObject().append(STATE, TxState.active.toString()).append(STARTED, date)
            .append(LAST_TOUCHED, date);
    txCollection.insert(txDoc, WriteConcern.SAFE);
    return txDoc.get(ID).toString();
}

From source file:com.softwear.plugins.mongodb.MongoDBScriptObject.java

License:BSD License

public WriteConcern js_getMongoWriteConcern(String _type) {
    WriteConcern wc = WriteConcern.UNACKNOWLEDGED;
    if (_type.equals("none") || _type.equals("unacknowledge")) {
        wc = WriteConcern.UNACKNOWLEDGED;
    } else if (_type.equals("normal")) {
        wc = WriteConcern.NORMAL;/*from  ww  w  .ja va 2  s.  c  o m*/
    } else if (_type.equals("safe")) {
        wc = WriteConcern.SAFE;
    } else if (_type.equals("replicas_safe")) {
        wc = WriteConcern.REPLICAS_SAFE;
    } else if (_type.equals("fsync_safe")) {
        wc = WriteConcern.FSYNC_SAFE;
    }
    return wc;
}

From source file:com.stratio.ingestion.morphline.checkpointfilter.handler.MongoCheckpointFilterHandler.java

License:Apache License

private void initMongo(String mongoUri) {
    this.mongoClientURI = new MongoClientURI(mongoUri,
            MongoClientOptions.builder().writeConcern(WriteConcern.SAFE));
    try {//  w  ww . ja va2s  .c om
        this.mongoClient = new MongoClient(mongoClientURI);
    } catch (UnknownHostException e) {
        e.printStackTrace();
    }
    if (mongoClientURI.getDatabase() != null) {
        this.mongoDb = mongoClient.getDB(mongoClientURI.getDatabase());
    }
    if (mongoClientURI.getCollection() != null) {
        this.mongoCollection = mongoDb.getCollection(mongoClientURI.getCollection());
    }
}