Example usage for com.mongodb DBCollection findAndModify

List of usage examples for com.mongodb DBCollection findAndModify

Introduction

In this page you can find the example usage for com.mongodb DBCollection findAndModify.

Prototype

@Nullable
public DBObject findAndModify(@Nullable final DBObject query, @Nullable final DBObject fields,
        @Nullable final DBObject sort, final boolean remove, @Nullable final DBObject update,
        final boolean returnNew, final boolean upsert) 

Source Link

Document

Atomically modify and return a single document.

Usage

From source file:backend.facades.UserController.java

protected static String getNextId(DB db, String seq_name) {
    String sequence_collection = "seq"; // the name of the sequence collection
    String sequence_field = "seq"; // the name of the field which holds the sequence

    DBCollection seq = db.getCollection(sequence_collection); // get the collection (this will create it if needed)               

    if (seq == null) {
        seq = db.createCollection(sequence_collection, null);
    }/*from w w  w .  j  ava  2 s . co m*/

    // this object represents your "query", its analogous to a WHERE clause in SQL
    DBObject query = new BasicDBObject();
    query.put("_id", seq_name); // where _id = the input sequence name

    // this object represents the "update" or the SET blah=blah in SQL
    DBObject change = new BasicDBObject(sequence_field, 1);
    DBObject update = new BasicDBObject("$inc", change); // the $inc here is a mongodb command for increment

    // Atomically updates the sequence field and returns the value for you
    DBObject res = seq.findAndModify(query, new BasicDBObject(), new BasicDBObject(), false, update, true,
            true);
    return res.get(sequence_field).toString();
}

From source file:com.continuent.tungsten.replicator.applier.MongoApplier.java

License:Open Source License

/**
 * {@inheritDoc}/* ww  w  .ja  v  a2  s .c o m*/
 * 
 * @see com.continuent.tungsten.replicator.applier.RawApplier#commit()
 */
@Override
public void commit() throws ReplicatorException, InterruptedException {
    // If we don't have a last header, there is nothing to be done.
    if (latestHeader == null) {
        if (logger.isDebugEnabled())
            logger.debug("Unable to commit; last header is null");
        return;
    }

    // Connect to the schema and collection.
    DB db = m.getDB(serviceSchema);
    DBCollection trepCommitSeqno = db.getCollection("trep_commit_seqno");

    // Construct query.
    DBObject query = new BasicDBObject();
    query.put("task_id", taskId);

    // Construct update.
    BasicDBObject doc = new BasicDBObject();
    doc.put("task_id", taskId);
    doc.put("seqno", latestHeader.getSeqno());
    // Short seems to cast to Integer in MongoDB.
    doc.put("fragno", latestHeader.getFragno());
    doc.put("last_frag", latestHeader.getLastFrag());
    doc.put("source_id", latestHeader.getSourceId());
    doc.put("epoch_number", latestHeader.getEpochNumber());
    doc.put("event_id", latestHeader.getEventId());
    doc.put("extract_timestamp", latestHeader.getExtractedTstamp().getTime());

    // Update trep_commit_seqno.
    DBObject updatedDoc = trepCommitSeqno.findAndModify(query, null, null, false, doc, true, true);
    if (logger.isDebugEnabled()) {
        if (updatedDoc == null)
            logger.debug("Unable to update/insert trep_commit_seqno: query=" + query + " doc=" + doc);
        else
            logger.debug("Trep_commit_seqno updated: updatedDoc=" + updatedDoc);
    }
}

From source file:com.ebay.cloud.cms.lock.mongo.MongoMutex.java

License:Apache License

private static void createIfNotExist(DBCollection coll, String lockName) {
    BasicDBObject q = new BasicDBObject();
    q.put(LOCK_NAME, lockName);//w  w w .  j a  va  2 s . c  o m

    BasicDBObject u = new BasicDBObject();
    BasicDBObject o = new BasicDBObject();
    o.put(LOCK_NAME, lockName);
    u.put(MongoOperand.set, o);

    coll.findAndModify(q, null, null, false, u, true, true);
}

From source file:com.hangum.tadpole.mongodb.core.query.MongoDBQuery.java

License:Open Source License

/**
 * findAndModify/*from w  w w  .j  a  va2s.c o m*/
 * 
 * @param userDB
 * @param collName
 * @param objQuery
 * @param objSort
 * @param isRemove
 * @param objUpdate
 * @param isReturnNew
 * @param objFields
 * @param isUpsert
 * @return
 * @throws Exception
 */
public static DBObject findAndModify(UserDBDAO userDB, String collName, DBObject objQuery, DBObject objSort,
        DBObject objFields, boolean isRemove, DBObject objUpdate, boolean isReturnNew, boolean isUpsert)
        throws Exception {
    DBCollection coll = findCollection(userDB, collName);
    DBObject retDBObject = coll.findAndModify(objQuery, objFields, objSort, isRemove, objUpdate, isReturnNew,
            isUpsert);

    return retDBObject;
}

From source file:com.ikanow.infinit.e.processing.generic.aggregation.AssociationAggregationUtils.java

License:Open Source License

/**
 * Add events to the elastic search index for events
 * and the mongodb collection//from  w ww  . java2  s .co m
 * so they are searchable for searchsuggest
 * 
 * Step 1.a, try to just update alias's
 * Step 1.b, if fail, create new entry
 * 
 * Step 2, Update totalfreq and doccount
 * 
 * Step 3, After updating totalfreq and doccount, write to ES for every group
 * 
 * @param events
 */
public static void updateEventFeatures(Map<String, Map<ObjectId, AssociationFeaturePojo>> eventFeatures) {
    // Some diagnostic counters:
    int numCacheMisses = 0;
    int numCacheHits = 0;
    int numNewAssocs = 0;
    long entityAggregationTime = new Date().getTime();

    DBCollection col = DbManager.getFeature().getAssociation();

    // (This fn is normally run for a single community id)
    CommunityFeatureCaches.CommunityFeatureCache currCache = null;

    String savedSyncTime = null;
    for (Map<ObjectId, AssociationFeaturePojo> evtCommunity : eventFeatures.values()) {

        Iterator<Map.Entry<ObjectId, AssociationFeaturePojo>> it = evtCommunity.entrySet().iterator();
        while (it.hasNext()) {
            Map.Entry<ObjectId, AssociationFeaturePojo> evtFeatureKV = it.next();
            try {
                AssociationFeaturePojo evtFeature = evtFeatureKV.getValue();
                long nSavedDocCount = evtFeature.getDoccount();

                ObjectId communityID = evtFeature.getCommunityId();

                if ((null == currCache) || !currCache.getCommunityId().equals(evtFeatureKV.getKey())) {
                    currCache = CommunityFeatureCaches.getCommunityFeatureCache(evtFeatureKV.getKey());
                    if (_diagnosticMode) {
                        if (_logInDiagnosticMode)
                            System.out.println(
                                    "AssociationAggregationUtils.updateEventFeatures, Opened cache for community: "
                                            + evtFeatureKV.getKey());
                    }
                } //TESTED (by hand)               

                // Is this in our cache? If so can short cut a bunch of the DB interaction:
                AssociationFeaturePojo cachedAssoc = currCache.getCachedAssocFeature(evtFeature);
                if (null != cachedAssoc) {
                    if (_incrementalMode) {
                        if (_diagnosticMode) {
                            if (_logInDiagnosticMode)
                                System.out.println(
                                        "AssociationAggregationUtils.updateEventFeatures, skip cached: "
                                                + cachedAssoc.toDb());
                            //TODO (INF-2825): should be continue-ing here so can use delta more efficiently...
                        }
                    } else if (_diagnosticMode) {
                        if (_logInDiagnosticMode)
                            System.out
                                    .println("AssociationAggregationUtils.updateEventFeatures, grabbed cached: "
                                            + cachedAssoc.toDb());
                    }
                    numCacheHits++;
                } //TESTED (by hand)         
                else {
                    numCacheMisses++;
                }

                //try to update
                BasicDBObject query = new BasicDBObject(AssociationFeaturePojo.index_, evtFeature.getIndex());
                query.put(AssociationFeaturePojo.communityId_, communityID);

                //Step1 try to update alias
                //update arrays
                BasicDBObject multiopAliasArrays = new BasicDBObject();
                // Entity1 Alias:
                if (null != evtFeature.getEntity1_index()) {
                    evtFeature.addEntity1(evtFeature.getEntity1_index());
                }
                if (null != evtFeature.getEntity1()) {
                    if ((null == cachedAssoc) || (null == cachedAssoc.getEntity1())
                            || !cachedAssoc.getEntity1().containsAll(evtFeature.getEntity1())) {
                        BasicDBObject multiopE = new BasicDBObject(MongoDbManager.each_,
                                evtFeature.getEntity1());
                        multiopAliasArrays.put(AssociationFeaturePojo.entity1_, multiopE);
                    }
                } //TESTED (by hand)

                // Entity2 Alias:
                if (null != evtFeature.getEntity2_index()) {
                    evtFeature.addEntity2(evtFeature.getEntity2_index());
                }
                if (null != evtFeature.getEntity2()) {
                    if ((null == cachedAssoc) || (null == cachedAssoc.getEntity2())
                            || !cachedAssoc.getEntity2().containsAll(evtFeature.getEntity2())) {
                        BasicDBObject multiopE = new BasicDBObject(MongoDbManager.each_,
                                evtFeature.getEntity2());
                        multiopAliasArrays.put(AssociationFeaturePojo.entity2_, multiopE);
                    }
                } //TESTED (by hand)

                // verb/verb cat alias:
                if (null != evtFeature.getVerb_category()) {
                    evtFeature.addVerb(evtFeature.getVerb_category());
                }
                if (null != evtFeature.getVerb()) {
                    if ((null == cachedAssoc) || (null == cachedAssoc.getVerb())
                            || !cachedAssoc.getVerb().containsAll(evtFeature.getVerb())) {
                        BasicDBObject multiopE = new BasicDBObject(MongoDbManager.each_, evtFeature.getVerb());
                        multiopAliasArrays.put(AssociationFeaturePojo.verb_, multiopE);
                    }
                } //TESTED (by hand)

                // OK - now we can copy across the fields into the cache:
                if (null != cachedAssoc) {
                    currCache.updateCachedAssocFeatureStatistics(cachedAssoc, evtFeature); //(evtFeature is now fully up to date)
                } //TESTED (by hand)

                BasicDBObject updateOp = new BasicDBObject();
                if (!multiopAliasArrays.isEmpty()) {
                    updateOp.put(MongoDbManager.addToSet_, multiopAliasArrays);
                }
                // Document count for this event
                BasicDBObject updateFreqDocCount = new BasicDBObject(AssociationFeaturePojo.doccount_,
                        nSavedDocCount);
                updateOp.put(MongoDbManager.inc_, updateFreqDocCount);

                BasicDBObject fields = new BasicDBObject(AssociationFeaturePojo.doccount_, 1);
                fields.put(AssociationFeaturePojo.entity1_, 1);
                fields.put(AssociationFeaturePojo.entity2_, 1);
                fields.put(AssociationFeaturePojo.verb_, 1);
                //(slightly annoying, since only want these if updating dc but won't know
                // until after i've got this object)

                fields.put(AssociationFeaturePojo.db_sync_time_, 1);
                fields.put(AssociationFeaturePojo.db_sync_doccount_, 1);

                DBObject dboUpdate = null;
                if (_diagnosticMode) {
                    if (null == cachedAssoc) {
                        dboUpdate = col.findOne(query, fields);
                    }
                } else {
                    if (null != cachedAssoc) {
                        col.update(query, updateOp, false, false);
                    } else { // Not cached - so have to grab the feature we're either getting or creating
                        dboUpdate = col.findAndModify(query, fields, new BasicDBObject(), false, updateOp,
                                false, true);
                        // (can use findAndModify because specify index, ie the shard key)
                        // (returns event before the changes above, update the feature object below)
                        // (also atomically creates the object if it doesn't exist so is "distributed-safe")
                    }
                }
                if ((null != cachedAssoc) || ((dboUpdate != null) && !dboUpdate.keySet().isEmpty())) // (feature already exists)
                {
                    AssociationFeaturePojo egp = cachedAssoc;

                    if (null == egp) {
                        egp = AssociationFeaturePojo.fromDb(dboUpdate, AssociationFeaturePojo.class);
                        evtFeature.setDoccount(egp.getDoccount() + nSavedDocCount);
                        evtFeature.setDb_sync_doccount(egp.getDb_sync_doccount());
                        evtFeature.setDb_sync_time(egp.getDb_sync_time());
                        if (null != egp.getEntity1()) {
                            for (String ent : egp.getEntity1())
                                evtFeature.addEntity1(ent);
                        }
                        if (null != egp.getEntity2()) {
                            for (String ent : egp.getEntity2())
                                evtFeature.addEntity2(ent);
                        }
                        if (null != egp.getVerb()) {
                            for (String verb : egp.getVerb())
                                evtFeature.addVerb(verb);
                        }
                    } //TESTED (cached and non-cached cases)
                      // (in the cached case, evtFeature has already been updated by updateCachedAssocFeatureStatistics)

                    if (_diagnosticMode) {
                        if (_logInDiagnosticMode)
                            System.out.println("AssociationAggregationUtils.updateEventFeatures, found: "
                                    + ((BasicDBObject) egp.toDb()).toString());
                        if (_logInDiagnosticMode)
                            System.out.println(
                                    "AssociationAggregationUtils.updateEventFeatures, ^^^ found from query: "
                                            + query.toString() + " / " + updateOp.toString());
                    }
                    // (In background aggregation mode we update db_sync_prio when checking the -otherwise unused, unlike entities- document update schedule) 
                } else // (the object in memory is now an accurate representation of the database, minus some fields we'll now add)
                {
                    numNewAssocs++;

                    // Synchronization settings for the newly created object
                    evtFeature.setDb_sync_doccount(nSavedDocCount);
                    if (null == savedSyncTime) {
                        savedSyncTime = Long.toString(System.currentTimeMillis());
                    }
                    evtFeature.setDb_sync_time(savedSyncTime);

                    // This is all "distributed safe" (apart from the db_syc_xxx and it doesn't matter if that is 
                    // out of date, the update will just be slightly out-of-date at worst) since (otherwise) these fields are 
                    // only set here, and the findAndModify is atomic

                    BasicDBObject baseFields = new BasicDBObject();
                    if (null != evtFeature.getEntity1_index()) {
                        baseFields.put(AssociationFeaturePojo.entity1_index_, evtFeature.getEntity1_index());
                    }
                    if (null != evtFeature.getEntity2_index()) {
                        baseFields.put(AssociationFeaturePojo.entity2_index_, evtFeature.getEntity2_index());
                    }
                    if (null != evtFeature.getVerb_category()) {
                        baseFields.put(AssociationFeaturePojo.verb_category_, evtFeature.getVerb_category());
                    }
                    baseFields.put(AssociationFeaturePojo.assoc_type_, evtFeature.getAssociation_type());
                    baseFields.put(AssociationFeaturePojo.db_sync_doccount_, evtFeature.getDb_sync_doccount());
                    baseFields.put(AssociationFeaturePojo.db_sync_time_, evtFeature.getDb_sync_time());
                    baseFields.put(AssociationFeaturePojo.db_sync_prio_, 1000.0); // (ensures new objects are quickly index-synchronized)

                    if (!_diagnosticMode) {
                        // Store the object
                        col.update(query, new BasicDBObject(MongoDbManager.set_, baseFields));
                    } else {
                        if (_logInDiagnosticMode)
                            System.out.println("AssociationAggregationUtils.updateEventFeatures, not found: "
                                    + query.toString() + " / " + baseFields.toString() + "/ orig_update= "
                                    + updateOp.toString());
                    }

                    // (Note even in background aggregation mode we still perform the feature synchronization
                    //  for new entities - and it has to be right at the end because it "corrupts" the objects)

                } //(end if first time seen)

                if (null == cachedAssoc) { // First time we've seen this locally, so add to cache
                    currCache.addCachedAssocFeature(evtFeature);
                    if (_diagnosticMode) {
                        if (_logInDiagnosticMode)
                            System.out
                                    .println("AssociationAggregationUtils.updateEventFeatures, added to cache: "
                                            + evtFeature.toDb());
                    }
                } //TESTED (by hand)                           
            } catch (Exception e) {
                // Exception, remove from feature list
                it.remove();

                // If an exception occurs log the error
                logger.error("Exception Message: " + e.getMessage(), e);
            }

        } // (end loop over all communities for the set of features sharing and index)                        
    } // (end loop over indexes) 

    if ((numCacheHits > 0) || (numCacheMisses > 0)) { // ie some assocs were grabbed
        int cacheSize = 0;
        if (null != currCache) {
            cacheSize = currCache.getAssocCacheSize();
        }
        StringBuffer logMsg = new StringBuffer() // (should append key, but don't have that...)
                .append(" assoc_agg_time_ms=").append(new Date().getTime() - entityAggregationTime)
                .append(" total_assocs=").append(eventFeatures.size()).append(" new_assocs=")
                .append(numNewAssocs).append(" cache_misses=").append(numCacheMisses).append(" cache_hits=")
                .append(numCacheHits).append(" cache_size=").append(cacheSize);

        logger.info(logMsg.toString());
    }

}

From source file:com.ikanow.infinit.e.processing.generic.aggregation.EntityAggregationUtils.java

License:Open Source License

/**
 * Updates the feature entries for the list of entities
 * that was just extracted including changing frequency,
 * adding aliases etc/*from   ww w . j a v a2s .c o  m*/
 * 
 * This method now has 3 steps:
 * 1. Try to update alias
 *    1.a If fail, create new gaz
 * 2. Update totalfreq and doccount
 * 
 * @param ents List of entities to update in the entity feature
 */
public static void updateEntityFeatures(Map<String, Map<ObjectId, EntityFeaturePojo>> entFeatures) {
    // Some diagnostic counters:
    int numCacheMisses = 0;
    int numCacheHits = 0;
    int numNewEntities = 0;
    long entityAggregationTime = new Date().getTime();

    DBCollection col = DbManager.getFeature().getEntity();

    // (This fn is normally run for a single community id)
    CommunityFeatureCaches.CommunityFeatureCache currCache = null;

    String savedSyncTime = null;
    for (Map<ObjectId, EntityFeaturePojo> entCommunity : entFeatures.values()) {

        Iterator<Map.Entry<ObjectId, EntityFeaturePojo>> it = entCommunity.entrySet().iterator();
        while (it.hasNext()) {
            Map.Entry<ObjectId, EntityFeaturePojo> entFeatureKV = it.next();
            try {
                EntityFeaturePojo entFeature = entFeatureKV.getValue();

                long nSavedDocCount = entFeature.getDoccount();
                long nSavedFreqCount = entFeature.getTotalfreq();
                // (these should be constant across all communities but keep it here
                //  so can assign it using entFeature, it's v cheap so no need to get once like for sync vars)

                // For each community, see if the entity feature already exists *for that community*               
                ObjectId communityID = entFeature.getCommunityId();
                if (null != communityID) {
                    if ((null == currCache) || !currCache.getCommunityId().equals(entFeatureKV.getKey())) {
                        currCache = CommunityFeatureCaches.getCommunityFeatureCache(entFeatureKV.getKey());
                        if (_diagnosticMode) {
                            if (_logInDiagnosticMode)
                                System.out.println(
                                        "EntityAggregationUtils.updateEntityFeatures, Opened cache for community: "
                                                + entFeatureKV.getKey());
                        }
                    } //TESTED (by hand)

                    // Is this in our cache? If so can short cut a bunch of the DB interaction:
                    EntityFeaturePojo cachedEnt = currCache.getCachedEntityFeature(entFeature);
                    if (null != cachedEnt) {
                        if (_incrementalMode) {
                            if (_diagnosticMode) {
                                if (_logInDiagnosticMode)
                                    System.out.println(
                                            "EntityAggregationUtils.updateEntityFeatures, skip cached: "
                                                    + cachedEnt.toDb());
                                //TODO (INF-2825): should be continue-ing here (after implementing incremental caching fully) so can use delta more efficiently...
                            }
                        } else if (_diagnosticMode) {
                            if (_logInDiagnosticMode)
                                System.out
                                        .println("EntityAggregationUtils.updateEntityFeatures, grabbed cached: "
                                                + cachedEnt.toDb());
                        }
                        numCacheHits++;

                    } //TESTED (by hand)                  
                    else {
                        numCacheMisses++;
                    }

                    BasicDBObject query = new BasicDBObject(EntityFeaturePojo.index_, entFeature.getIndex());
                    query.put(EntityFeaturePojo.communityId_, communityID);
                    BasicDBObject updateOp = new BasicDBObject();
                    // Add aliases:
                    BasicDBObject updateOpA = new BasicDBObject();
                    if (null != entFeature.getAlias()) {
                        if ((null == cachedEnt) || (null == cachedEnt.getAlias())
                                || !cachedEnt.getAlias().containsAll(entFeature.getAlias())) {
                            //(if the data we have is already cached, don't bother adding it again)
                            BasicDBObject multiopE = new BasicDBObject(MongoDbManager.each_,
                                    entFeature.getAlias());
                            updateOpA.put(EntityFeaturePojo.alias_, multiopE);
                        } //TESTED (by hand)
                    }
                    // Add link data, if there is any:
                    if ((null != entFeature.getSemanticLinks()) && !entFeature.getSemanticLinks().isEmpty()) {
                        if ((null == cachedEnt) || (null == cachedEnt.getSemanticLinks())
                                || !cachedEnt.getSemanticLinks().containsAll(entFeature.getSemanticLinks())) {
                            //(if the data we have is already cached, don't bother adding it again)
                            BasicDBObject multiopF = new BasicDBObject(MongoDbManager.each_,
                                    entFeature.getSemanticLinks());
                            updateOpA.put(EntityFeaturePojo.linkdata_, multiopF);
                        } //TESTED (by hand)
                    }
                    // OK - now we can copy across the fields into the cache:
                    if (null != cachedEnt) {
                        currCache.updateCachedEntityFeatureStatistics(cachedEnt, entFeature); //(entFeature is now fully up to date)
                    } //TESTED (by hand)

                    if (!updateOpA.isEmpty()) {
                        updateOp.put(MongoDbManager.addToSet_, updateOpA);
                    }
                    // Update frequency:
                    BasicDBObject updateOpB = new BasicDBObject();
                    updateOpB.put(EntityFeaturePojo.totalfreq_, nSavedFreqCount);
                    updateOpB.put(EntityFeaturePojo.doccount_, nSavedDocCount);
                    updateOp.put(MongoDbManager.inc_, updateOpB);

                    //try to use find/modify to see if something comes back and set doc freq/totalfreq
                    BasicDBObject fields = new BasicDBObject(EntityFeaturePojo.totalfreq_, 1);
                    fields.put(EntityFeaturePojo.doccount_, 1);
                    fields.put(EntityFeaturePojo.alias_, 1);
                    fields.put(EntityFeaturePojo.linkdata_, 1);
                    //(slightly annoying, since only want these 2 largish fields if updating freq but won't know
                    // until after i've got this object)                  
                    fields.put(EntityFeaturePojo.db_sync_time_, 1);
                    fields.put(EntityFeaturePojo.db_sync_doccount_, 1);

                    DBObject dboUpdate = null;
                    if (_diagnosticMode) {
                        if (null == cachedEnt) {
                            dboUpdate = col.findOne(query, fields);
                        }
                    } else {
                        if (null != cachedEnt) {
                            col.update(query, updateOp, false, false);
                        } else { // Not cached - so have to grab the feature we're either getting or creating
                            dboUpdate = col.findAndModify(query, fields, new BasicDBObject(), false, updateOp,
                                    false, true);
                            // (can use findAndModify because specify index, ie the shard key)
                            // (returns entity before the changes above, update the feature object below)
                            // (also atomically creates the object if it doesn't exist so is "distributed-safe")
                        }
                    }
                    if ((null != cachedEnt) || ((dboUpdate != null) && !dboUpdate.keySet().isEmpty())) // (feature already exists)
                    {
                        EntityFeaturePojo gp = cachedEnt;

                        // (Update the entity feature to be correct so that it can be accurately synchronized with the index)
                        if (null == gp) {
                            gp = EntityFeaturePojo.fromDb(dboUpdate, EntityFeaturePojo.class);
                            entFeature.setTotalfreq(gp.getTotalfreq() + nSavedFreqCount);
                            entFeature.setDoccount(gp.getDoccount() + nSavedDocCount);
                            entFeature.setDbSyncDoccount(gp.getDbSyncDoccount());
                            entFeature.setDbSyncTime(gp.getDbSyncTime());
                            if (null != gp.getAlias()) {
                                entFeature.addAllAlias(gp.getAlias());
                            }
                            if (null != gp.getSemanticLinks()) {
                                entFeature.addToSemanticLinks(gp.getSemanticLinks());
                            }
                        } //TESTED (cached case and non-cached case)
                          // (in the cached case, entFeature has already been updated by updateCachedEntityFeatureStatistics)

                        if (_diagnosticMode) {
                            if (_logInDiagnosticMode)
                                System.out.println("EntityAggregationUtils.updateEntityFeatures, found: "
                                        + ((BasicDBObject) gp.toDb()).toString());
                            if (_logInDiagnosticMode)
                                System.out.println(
                                        "EntityAggregationUtils.updateEntityFeatures, ^^^ found from query: "
                                                + query.toString() + " / " + updateOp.toString());
                        }
                        // (In background aggregation mode we update db_sync_prio when checking the doc update schedule) 
                    } else // (the object in memory is now an accurate representation of the database, minus some fields we'll now add)
                    {
                        numNewEntities++;

                        // Synchronization settings for the newly created object
                        if (null == savedSyncTime) {
                            savedSyncTime = Long.toString(System.currentTimeMillis());
                        }
                        entFeature.setDbSyncDoccount(nSavedDocCount);
                        entFeature.setDbSyncTime(savedSyncTime);

                        // This is all "distributed safe" (apart from the db_syc_xxx and it doesn't matter if that is 
                        // out of date, the update will just be slightly out-of-date at worst) since (otherwise) these fields are 
                        // only set here, and the findAndModify is atomic

                        // (Do in raw MongoDB for performance)
                        BasicDBObject baseFields = new BasicDBObject();
                        baseFields.put(EntityFeaturePojo.dimension_, entFeature.getDimension().toString());
                        baseFields.put(EntityFeaturePojo.type_, entFeature.getType());
                        baseFields.put(EntityFeaturePojo.disambiguated_name_,
                                entFeature.getDisambiguatedName());
                        baseFields.put(EntityFeaturePojo.db_sync_doccount_, entFeature.getDbSyncDoccount());
                        baseFields.put(EntityFeaturePojo.db_sync_prio_, 1000.0);
                        baseFields.put(EntityFeaturePojo.db_sync_time_, entFeature.getDbSyncTime());
                        if ((null != entFeature.getSemanticLinks())
                                && !entFeature.getSemanticLinks().isEmpty()) {
                            baseFields.put(EntityFeaturePojo.linkdata_, entFeature.getSemanticLinks());
                        }

                        //attempt to add geotag (makes necessary checks on util side)
                        //also add ontology type if geotag is found
                        EntityGeotagAggregationUtils.addEntityGeo(entFeature);
                        if (entFeature.getGeotag() != null) {
                            BasicDBObject geo = new BasicDBObject(GeoPojo.lat_, entFeature.getGeotag().lat);
                            geo.put(GeoPojo.lon_, entFeature.getGeotag().lon);
                            baseFields.put(EntityFeaturePojo.geotag_, geo);

                            if (entFeature.getOntology_type() != null) {
                                baseFields.put(EntityFeaturePojo.ontology_type_, entFeature.getOntology_type());
                            }
                        }

                        if (!_diagnosticMode) {
                            // Store the object
                            col.update(query, new BasicDBObject(MongoDbManager.set_, baseFields));
                        } else {
                            if (_logInDiagnosticMode)
                                System.out.println("EntityAggregationUtils.updateEntityFeatures, not found: "
                                        + query.toString() + ": " + baseFields.toString());
                        }

                    } //(end first time this feature seen - globally)

                    if (null == cachedEnt) { // First time we've seen this locally, so add to cache
                        currCache.addCachedEntityFeature(entFeature);
                        if (_diagnosticMode) {
                            if (_logInDiagnosticMode)
                                System.out
                                        .println("EntityAggregationUtils.updateEntityFeatures, added to cache: "
                                                + entFeature.toDb());
                        }
                    } //TESTED (by hand)                     

                } //(end if community id assigned)
            } catch (Exception e) {
                // Exception, remove from feature list
                it.remove();

                // If an exception occurs log the error
                logger.error("Exception Message: " + e.getMessage(), e);
            }

        } // (end loop over communities)
    } // (end loop over indexes)

    if ((numCacheHits > 0) || (numCacheMisses > 0)) { // ie some ents were grabbed
        int cacheSize = 0;
        if (null != currCache) {
            cacheSize = currCache.getEntityCacheSize();
        }
        StringBuffer logMsg = new StringBuffer() // (should append key, but don't have that...)
                .append(" ent_agg_time_ms=").append(new Date().getTime() - entityAggregationTime)
                .append(" total_ents=").append(entFeatures.size()).append(" new_ents=").append(numNewEntities)
                .append(" cache_misses=").append(numCacheMisses).append(" cache_hits=").append(numCacheHits)
                .append(" cache_size=").append(cacheSize);

        logger.info(logMsg.toString());
    }

}

From source file:com.images3.data.impl.ImageMetricsServiceImplMongoDB.java

License:Apache License

private void updateSecondMetrics(ImageMetricsOS metrics) {
    DBCollection coll = getDatabase().getCollection("ImageMetrics");
    BasicDBObject criteria = new BasicDBObject().append("imagePlantId", metrics.getImagePlantId())
            .append("templateName", metrics.getTemplateName()).append("second", metrics.getSecond());
    BasicDBObject returnFields = new BasicDBObject();
    BasicDBObject sort = new BasicDBObject();
    boolean remove = false;
    BasicDBObject increase = getImageIncrements(metrics);
    BasicDBObject update = new BasicDBObject().append("$inc", increase);
    boolean returnNew = true;
    boolean upsert = true;
    coll.findAndModify(criteria, returnFields, sort, remove, update, returnNew, upsert);
}

From source file:com.images3.data.impl.ImageMetricsServiceImplMongoDB.java

License:Apache License

private void updateTemplateMetrics(ImageMetricsOS metrics) {
    DBCollection coll = getDatabase().getCollection("ImageMetrics");
    BasicDBObject criteria = new BasicDBObject().append("imagePlantId", metrics.getImagePlantId())
            .append("templateName", metrics.getTemplateName()).append("second", 0);
    BasicDBObject returnFields = new BasicDBObject();
    BasicDBObject sort = new BasicDBObject();
    boolean remove = false;
    BasicDBObject increase = getImageIncrements(metrics);
    BasicDBObject update = new BasicDBObject().append("$inc", increase);
    boolean returnNew = true;
    boolean upsert = true;
    coll.findAndModify(criteria, returnFields, sort, remove, update, returnNew, upsert);
}

From source file:com.openbravo.data.loader.MongoDBPreparedSentence.java

@Override
public DataResultSet openExec(Object params) throws BasicException {
    closeExec();//from   w ww .j  av  a 2  s  . com

    DBCollection collection = m_s.getMongoDBDatabase().getCollection(m_collectionName);

    if (m_SerWrite != null) {
        if (m_insert)
            m_SerWrite.writeValues(new MongoDBPreparedSentencePars(m_insertDBObject, m_writeParamColumnMap),
                    params);
        else if (m_find)
            m_SerWrite.writeValues(new MongoDBPreparedSentencePars(m_findDBObject, m_writeParamColumnMap),
                    params);
        else if (m_update)
            m_SerWrite.writeValues(new MongoDBPreparedSentencePars(m_updateDBObject, m_writeParamColumnMap),
                    params);
    }

    if (!m_nullColumn.isEmpty())
        m_findDBObject.append(m_nullColumn, new BasicDBObject("$exists", true));

    if (m_lessThanColumn != null) {
        for (int i = 0; i < m_lessThanColumn.length; ++i) {
            Object lessThanValue = m_findDBObject.get(m_lessThanColumn[i]);
            m_findDBObject.removeField(m_lessThanColumn[i]);
            m_findDBObject.append(m_lessThanColumn[i], new BasicDBObject("$lt", lessThanValue));
        }
    }

    if (m_greaterThanColumn != null) {
        for (int i = 0; i < m_greaterThanColumn.length; ++i) {
            Object greaterThanValue = m_findDBObject.get(m_greaterThanColumn[i]);
            m_findDBObject.removeField(m_greaterThanColumn[i]);
            m_findDBObject.append(m_greaterThanColumn[i], new BasicDBObject("$gt", greaterThanValue));
        }
    }

    // Insert statement
    if (m_insert) {
        collection.insert(m_insertDBObject);
    } else if (m_find) {

        if (!m_sortColumn.isEmpty()) {
            m_dbCursor = collection.find(m_findDBObject).sort(new BasicDBObject(m_sortColumn, 1));
            return new MongoDBDataResultSet(m_dbCursor, m_readParamColumnMap, m_SerRead);
        }

        if (!m_maxColumn.isEmpty()) {
            Iterator<DBObject> it = collection
                    .aggregate(new BasicDBObject("$match", m_findDBObject),
                            new BasicDBObject("$group",
                                    new BasicDBObject("_id", "null").append("MAX",
                                            new BasicDBObject("$max", "$" + m_maxColumn))))
                    .results().iterator();
            //Iterator<DBObject> it = collection.aggregate(new BasicDBObject("$group", new BasicDBObject("_id", "null").append("MAX", new BasicDBObject("$max", "$" + m_maxColumn)))).results().iterator();
            DBObject maxObject = new BasicDBObject();
            if (it.hasNext())
                maxObject = it.next();
            return new MongoDBDataResultSet(maxObject, m_readParamColumnMap, m_SerRead);
        }

        if (m_countAll) {
            Iterator<DBObject> it = collection
                    .aggregate(new BasicDBObject("$match", m_findDBObject), new BasicDBObject("$group",
                            new BasicDBObject("_id", "null").append("COUNT", new BasicDBObject("$sum", 1))))
                    .results().iterator();
            //Iterator<DBObject> it = collection.aggregate(new BasicDBObject("$group", new BasicDBObject("_id", "null").append("MAX", new BasicDBObject("$max", "$" + m_maxColumn)))).results().iterator();
            DBObject maxObject = new BasicDBObject();
            if (it.hasNext())
                maxObject = it.next();
            return new MongoDBDataResultSet(maxObject, m_readParamColumnMap, m_SerRead);
        }
        m_dbCursor = collection.find(m_findDBObject);
        return new MongoDBDataResultSet(m_dbCursor, m_readParamColumnMap, m_SerRead);
    } else if (m_update) {
        String findKey = ((String) m_writeParamColumnMap
                .get((Integer) m_writeParamColumnMap.keySet().toArray()[m_writeParamColumnMap.size() - 1]));
        String key = findKey.replace('s', ' ').trim();
        m_findDBObject.append(key, m_updateDBObject.get(findKey));

        // Remove the find criteria in the update object
        m_updateDBObject.remove(m_writeParamColumnMap
                .get((Integer) m_writeParamColumnMap.keySet().toArray()[m_writeParamColumnMap.size() - 1]));
        collection.findAndModify(m_findDBObject, null, null, true, m_updateDBObject, true, true);
        return new SentenceUpdateResultSet(0);
    }
    return null;
}

From source file:com.socialsky.mods.MongoPersistor.java

License:Apache License

private void doFindAndModify(Message<JsonObject> message) {
    String collectionName = getMandatoryString("collection", message);
    if (collectionName == null) {
        return;/*from   w w  w  . ja  va 2 s. c o m*/
    }
    JsonObject msgBody = message.body();
    DBObject update = jsonToDBObjectNullSafe(msgBody.getObject("update"));
    DBObject query = jsonToDBObjectNullSafe(msgBody.getObject("matcher"));
    DBObject sort = jsonToDBObjectNullSafe(msgBody.getObject("sort"));
    DBObject fields = jsonToDBObjectNullSafe(msgBody.getObject("fields"));
    boolean remove = msgBody.getBoolean("remove", false);
    boolean returnNew = msgBody.getBoolean("new", false);
    boolean upsert = msgBody.getBoolean("upsert", false);

    DBCollection collection = db.getCollection(collectionName);
    DBObject result = collection.findAndModify(query, fields, sort, remove, update, returnNew, upsert);

    JsonObject reply = new JsonObject();
    if (result != null) {
        JsonObject resultJson = new JsonObject(result.toMap());
        reply.putObject("result", resultJson);
    }
    sendOK(message, reply);
}