Example usage for com.mongodb DBObject containsField

List of usage examples for com.mongodb DBObject containsField

Introduction

In this page you can find the example usage for com.mongodb DBObject containsField.

Prototype

boolean containsField(String s);

Source Link

Document

Checks if this object contains a field with the given name.

Usage

From source file:com.ikanow.infinit.e.core.mapreduce.HadoopJobRunner.java

License:Open Source License

/**
 * Moves the output of a job from output_tmp to output and deletes
 * the tmp collection./*from ww  w . ja v a  2  s. c o m*/
 * 
 * @param cmr
 * @throws IOException 
 * @throws ParserConfigurationException 
 * @throws SAXException 
 */
private void moveTempOutput(CustomMapReduceJobPojo cmr)
        throws IOException, SAXException, ParserConfigurationException {
    // If we are an export job then move files:
    bringTempOutputToFront(cmr);
    // (the rest of this will just do nothing) 

    /**
     * Atomic plan:
     * If not append, move customlookup pointer to tmp collection, drop old collection.
     * If append, set sync flag (find/mod), move results from tmp to old, unset sync flag.
     * 
     */
    //step1 build out any of the post proc arguments
    DBObject postProcObject = null;
    boolean limitAllData = true;
    boolean hasSort = false;
    int limit = 0;
    BasicDBObject sort = new BasicDBObject();
    try {
        postProcObject = (DBObject) com.mongodb.util.JSON
                .parse(getQueryOrProcessing(cmr.query, QuerySpec.POSTPROC));
        if (postProcObject != null) {
            if (postProcObject.containsField("limitAllData")) {
                limitAllData = (Boolean) postProcObject.get("limitAllData");
            }
            if (postProcObject.containsField("limit")) {
                limit = (Integer) postProcObject.get("limit");
                if (postProcObject.containsField("sortField")) {
                    String sfield = (String) postProcObject.get("sortField");
                    int sortDir = 1;
                    if (postProcObject.containsField("sortDirection")) {
                        sortDir = (Integer) postProcObject.get("sortDirection");
                    }
                    sort.put(sfield, sortDir);
                    hasSort = true;
                } else if (limit > 0) {
                    //set a default sort because the user posted a limit
                    sort.put("_id", -1);
                    hasSort = true;
                }
            }
        }
    } catch (Exception ex) {
        _logger.info(
                "job_error_post_proc_title=" + cmr.jobtitle + " job_error_post_proc_id=" + cmr._id.toString()
                        + " job_error_post_proc_message=" + HarvestExceptionUtils.createExceptionMessage(ex));
    }

    //step 2a if not appending results then work on temp collection and swap to main
    if ((null == cmr.appendResults) || !cmr.appendResults) //format temp then change lookup pointer to temp collection
    {
        //transform all the results into necessary format:         
        DBCursor dbc_tmp = DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollectionTemp)
                .find(new BasicDBObject("key", null)).sort(sort).limit(limit);
        while (dbc_tmp.hasNext()) {
            DBObject dbo = dbc_tmp.next();
            Object key = dbo.get("_id");
            dbo.put("key", key);
            dbo.removeField("_id");
            DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollectionTemp).insert(dbo);
        }
        DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollectionTemp)
                .remove(new BasicDBObject("key", null));

        //swap the output collections
        BasicDBObject notappendupdates = new BasicDBObject(CustomMapReduceJobPojo.outputCollection_,
                cmr.outputCollectionTemp);
        notappendupdates.append(CustomMapReduceJobPojo.outputCollectionTemp_, cmr.outputCollection);
        DbManager.getCustom().getLookup().findAndModify(new BasicDBObject(CustomMapReduceJobPojo._id_, cmr._id),
                new BasicDBObject(MongoDbManager.set_, notappendupdates));
        String temp = cmr.outputCollectionTemp;
        cmr.outputCollectionTemp = cmr.outputCollection;
        cmr.outputCollection = temp;
    } else //step 2b if appending results then drop modified results in output collection
    {
        DbManager.getCustom().getLookup().findAndModify(new BasicDBObject(CustomMapReduceJobPojo._id_, cmr._id),
                new BasicDBObject(MongoDbManager.set_, new BasicDBObject("isUpdatingOutput", true)));
        //remove any aged out results
        if ((null != cmr.appendAgeOutInDays) && cmr.appendAgeOutInDays > 0) {
            //remove any results that have aged out
            long ageOutMS = (long) (cmr.appendAgeOutInDays * MS_IN_DAY);
            Date lastAgeOut = new Date(((new Date()).getTime() - ageOutMS));
            DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollection).remove(
                    new BasicDBObject("_id", new BasicDBObject(MongoDbManager.lt_, new ObjectId(lastAgeOut))));
        }
        DBCursor dbc_tmp;
        if (!limitAllData) {
            //sort and limit the temp data set because we only want to process it
            dbc_tmp = DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollectionTemp)
                    .find(new BasicDBObject("key", null)).sort(sort).limit(limit);
            limit = 0; //reset limit so we get everything in a few steps (we only want to limit the new data)
        } else {
            dbc_tmp = DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollectionTemp)
                    .find(new BasicDBObject("key", null));
        }

        DBCollection dbc = DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollection);
        //transform temp results and dump into output collection
        while (dbc_tmp.hasNext()) {
            DBObject dbo = dbc_tmp.next();
            //transform the dbo to format {_id:ObjectId, key:(prev_id), value:value}
            Object key = dbo.get("_id");
            dbo.put("key", key);
            dbo.removeField("_id");
            //_id field should be automatically set to objectid when inserting now
            dbc.insert(dbo);
        }
        //if there is a sort, we need to apply it to all the data now
        if (hasSort) {
            ObjectId OID = new ObjectId();
            BasicDBObject query = new BasicDBObject("_id", new BasicDBObject(MongoDbManager.lt_, OID));
            //find everything inserted before now and sort/limit the data
            DBCursor dbc_sort = dbc.find(query).sort(sort).limit(limit);
            while (dbc_sort.hasNext()) {
                //reinsert the data into db (it should be in sorted order naturally now)
                DBObject dbo = dbc_sort.next();
                dbo.removeField("_id");
                dbc.insert(dbo);
            }
            //remove everything inserted before we reorganized everything (should leave only the new results in natural order)
            dbc.remove(query);
        }
        DbManager.getCustom().getLookup().findAndModify(new BasicDBObject(CustomMapReduceJobPojo._id_, cmr._id),
                new BasicDBObject(MongoDbManager.set_, new BasicDBObject("isUpdatingOutput", false)));
    }
    //step3 clean up temp output collection so we can use it again
    // (drop it, removing chunks)
    try {
        DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollectionTemp).drop();
    } catch (Exception e) {
    } // That's fine, it probably just doesn't exist yet...
}

From source file:com.ikanow.infinit.e.data_model.custom.InfiniteMongoSplitter.java

License:Apache License

/**
 * Checks if the new params MAX_SPLITS and MAX_DOCS_PER_SPLIT are set
 * in the config.  If they are it will use those to do splits via limit/skip
 * otherwise it will call the previous chunking splitter in MongoSplitter.
 * // w  w w .  ja  va  2s.  c  o m
 * @param conf
 * @return
 */

public static List<InputSplit> calculateSplits(InfiniteMongoConfig conf) {
    // First off: What is our sharding scheme?

    boolean shardingPolicyNew = false;
    try {
        BasicDBObject shardQuery = new BasicDBObject("_id", "doc_metadata.metadata");
        BasicDBObject shardInfo = (BasicDBObject) DbManager.getCollection("config", "collections")
                .findOne(shardQuery);
        if (null != shardInfo) {
            BasicDBObject shardInfoKey = (BasicDBObject) shardInfo.get("key");
            if (null != shardInfoKey) {
                shardingPolicyNew = (shardInfoKey.size() > 1);
            }
        }
    } //TESTED (new and old)
    catch (Exception e) {
    } // stick with the old sharding, it's probably going to die soon after though, honestly

    // conf.getQuery returns a new copy of the query, so get once and use everywhere...
    BasicDBObject confQuery = (BasicDBObject) conf.getQuery();

    BasicDBObject srcTagsQuery = (BasicDBObject) conf.getSourceTags();

    String collection = conf.getInputURI().getCollection();
    if (!collection.equals(DbManager.getDocument().getContent().getName())
            && !collection.equals(DbManager.getDocument().getMetadata().getName())) {
        // Case 1: feature table or custom table
        // Just run legacy code
        return calculateSplits_phase2(conf, confQuery, false, false, null);
    } else { // complex cases...
        boolean simpleOtherIndex = false;
        // Check whether a simple query has been performed on a different indexed field         
        if (null == srcTagsQuery) { // (if srcTags specified, then going to want to use sourceKey as the index)
            for (String s : Arrays.asList(EntityPojo.docQuery_index_, DocumentPojo.url_)) {
                Object selector = confQuery.get(s);
                if (selector instanceof String) {
                    simpleOtherIndex = true;
                    break;
                } else if (selector instanceof DBObject) {
                    DBObject selectorDbo = (DBObject) selector;
                    if (selectorDbo.containsField(DbManager.in_)) {
                        simpleOtherIndex = true;
                        break;
                    }
                }
            } //TESTED (both types, plus check complex indexes don't work)         
              // ALLOWED: {"entities.index": { "$in": [ "xxx", "yyy"] }, {"entities.index": "xxx" }, ditto for "url"
              // NOT ALLOWED: { "entities.index": { "$ne": "xxx" } }
        }
        //TESTED check ignored if eg entity_index specified

        if (simpleOtherIndex) {
            // Case 2: we have a simple query on an indexed field 
            // Just run legacy code

            return calculateSplits_phase2(conf, confQuery, false, shardingPolicyNew, null);
        } //TESTED
        else if (conf.getLimit() > 0) { // debug
            //Case 3: Ensure we have small sets of sources to search over
            BasicDBList collectionOfSplits = splitPrecalculations_oldShardSchemeOrDebug(confQuery, srcTagsQuery,
                    conf.getMaxDocsPerSplit());
            final List<InputSplit> splits = new ArrayList<InputSplit>();

            boolean queryNonTrivial = isQueryNonTrivial(confQuery);
            if (!queryNonTrivial) {
                //Case 3a: query is trivial, so can just create splits directly from the split pre-calcs
                int toProcess = conf.getLimit();
                Iterator<Object> itSplit = collectionOfSplits.iterator();
                while ((toProcess > 0) && (itSplit.hasNext())) {
                    BasicDBObject split = (BasicDBObject) itSplit.next();

                    int docCount = (int) split.getLong(SourceHarvestStatusPojo.doccount_, 0L);
                    int toGet = (docCount > toProcess) ? toProcess : docCount;
                    BasicDBObject modQuery = convertQuery(confQuery, split.get(DocumentPojo.sourceKey_));
                    if (null != modQuery) {
                        splits.add(new InfiniteMongoInputSplit(conf.getInputURI(), conf.getInputKey(), modQuery,
                                conf.getFields(), conf.getSort(), toGet, 0, conf.isNoTimeout()));
                        toProcess -= docCount;
                    }
                } //TESTED
            } else {
                // Case 3b: annoying, some extra query terms, gonna need to do it the hard way...
                int toProcess = conf.getLimit();
                Iterator<Object> itSplit = collectionOfSplits.iterator();
                DBCollection coll = InfiniteMongoConfigUtil.getCollection(conf.getInputURI());
                while ((toProcess > 0) && (itSplit.hasNext())) {
                    BasicDBObject split = (BasicDBObject) itSplit.next();

                    BasicDBObject modQuery = convertQuery(confQuery, split.get(DocumentPojo.sourceKey_));
                    if (null != modQuery) {
                        int docsCounted = (int) coll.getCount(modQuery, null, toProcess, 0);
                        int toGet = (docsCounted > toProcess) ? toProcess : docsCounted;
                        if (docsCounted > 0) {
                            splits.add(new InfiniteMongoInputSplit(conf.getInputURI(), conf.getInputKey(),
                                    modQuery, conf.getFields(), conf.getSort(), toGet, 0, conf.isNoTimeout()));
                            toProcess -= docsCounted;
                        }
                    } //TESTED
                }
            } //TESTED

            return splits;
        } else { // More complex cases:

            if (shardingPolicyNew) {
                // Case 4a: NEW SHARDING SCHEME

                // Always fetch the new sources, eg convert communityId to sourceKeys
                try {
                    splitPrecalculations_newShardScheme(confQuery, srcTagsQuery); // (modifies confQuery if returns true)            
                    boolean queryNonTrivial = isQueryNonTrivial(confQuery);

                    return calculateSplits_phase2(conf, confQuery, !queryNonTrivial, shardingPolicyNew, null);

                    // (ie trivial query => always use chunks, bypass skip/limit test)
                } //TESTED (trivial + non-trivial)
                catch (Exception e) { // Didn't match any sources, no problem
                    return new ArrayList<InputSplit>();
                } //TESTED

            } //TESTED
            else {

                BasicDBList collectionOfSplits = splitPrecalculations_oldShardSchemeOrDebug(confQuery,
                        srcTagsQuery, conf.getMaxDocsPerSplit());

                if (null == collectionOfSplits) {
                    // Case 4b: OLD SHARDING SCHEME can't get a partition by source keys, just back off to old code
                    return calculateSplits_phase2(conf, confQuery, false, shardingPolicyNew, null);
                } //TESTED (old code)
                else {
                    conf.setMaxDocsPerSplit(2 * conf.getMaxDocsPerSplit());
                    // (because we stop creating splits when the exceed the size)

                    // Case 4c: OLD SHARDING SCHEME, have a source key partition
                    int nMaxCount = 1 + conf.getMaxDocsPerSplit() * conf.getMaxSplits();
                    boolean queryNonTrivial = isQueryNonTrivial(confQuery);
                    final List<InputSplit> splits = new ArrayList<InputSplit>();

                    BasicDBObject savedQuery = confQuery;

                    Iterator<Object> itSplit = collectionOfSplits.iterator();
                    BasicDBList bigSplit = null;
                    while (itSplit.hasNext()) {
                        BasicDBObject split = (BasicDBObject) itSplit.next();
                        int docCount = (int) split.getLong(SourceHarvestStatusPojo.doccount_, 0L);
                        if (docCount < nMaxCount) { // small split, will use skip/limit
                            BasicDBObject modQuery = convertQuery(savedQuery,
                                    split.get(DocumentPojo.sourceKey_));
                            if (null != modQuery) {

                                final int SPLIT_THRESHOLD = 3;
                                // A few cases:
                                if ((docCount < (SPLIT_THRESHOLD * conf.getMaxDocsPerSplit()))
                                        || !queryNonTrivial) {
                                    splits.addAll(calculateSplits_phase2(conf, modQuery, false,
                                            shardingPolicyNew, (Integer) docCount));
                                } //TESTED (based on limit, based on query)
                                else {
                                    // My guess at the point at which you might as well as do the full query in the hope you're going
                                    // to save some (empty) splits
                                    splits.addAll(calculateSplits_phase2(conf, modQuery, false,
                                            shardingPolicyNew, null));
                                } //TESTED
                            } //TESTED
                        } else { // large split, combine all these guys into an array of source keys
                            if (null == bigSplit) {
                                bigSplit = new BasicDBList();
                            }
                            bigSplit.add(split.get(DocumentPojo.sourceKey_));
                            // (guaranteed to be a single element)
                        }
                    } //(end loop over collections)

                    if (null != bigSplit) {

                        // If we have a big left over community then create a set of splits for that - always chunks if query trivial
                        if (1 == bigSplit.size()) {
                            confQuery.put(DocumentPojo.sourceKey_, bigSplit.iterator().next());
                        } else {
                            confQuery.put(DocumentPojo.sourceKey_, new BasicDBObject(DbManager.in_, bigSplit));
                        }
                        splits.addAll(calculateSplits_phase2(conf, confQuery, !queryNonTrivial,
                                shardingPolicyNew, null));
                    } //TESTED: singleton+trivial (sandy), array+trivial (sentiment/enron), array+non-trivial (sentiment/enron, docGeo), singleton+non-trivial (sandy, docGeo)

                    return splits;

                } //TESTED: end if Cases 4a, 4b, 4c

            } //(end if old vs new sharding policy)

        } //(non-debug case)
    } //(content or metadata table are most complex)
}

From source file:com.impetus.client.mongodb.MongoDBClient.java

License:Apache License

/**
 * Parses the map reduce command./*from   w w  w .j a v  a  2s . com*/
 * 
 * @param jsonClause
 *            the json clause
 * @return the map reduce command
 */
private MapReduceCommand parseMapReduceCommand(String jsonClause) {
    String collectionName = jsonClause.replaceFirst("(?ms).*?\\.\\s*(.+?)\\s*\\.\\s*mapReduce\\s*\\(.*", "$1");
    if (collectionName.contains("getCollection")) {
        collectionName = collectionName.replaceFirst(".*getCollection\\s*\\(\\s*(['\"])([^'\"]+)\\1\\s*\\).*",
                "$2");
    }

    DBCollection collection = mongoDb.getCollection(collectionName);

    String body = jsonClause.replaceFirst("^(?ms).*?mapReduce\\s*\\(\\s*(.*)\\s*\\)\\s*;?\\s*$", "$1");
    String mapFunction = findCommaSeparatedArgument(body, 0).trim();
    String reduceFunction = findCommaSeparatedArgument(body, 1).trim();

    String query = findCommaSeparatedArgument(body, 2).trim();
    DBObject parameters = (DBObject) JSON.parse(query);
    DBObject mongoQuery;
    if (parameters.containsField("query")) {
        mongoQuery = (DBObject) parameters.get("query");
    } else {
        mongoQuery = new BasicDBObject();
    }

    return new MapReduceCommand(collection, mapFunction, reduceFunction, null,
            MapReduceCommand.OutputType.INLINE, mongoQuery);
}

From source file:com.liferay.mongodb.hook.listeners.ExpandoColumnListener.java

License:Open Source License

protected void doOnAfterRemove(ExpandoColumn expandoColumn) throws Exception {

    ExpandoTable expandoTable = ExpandoTableLocalServiceUtil.getTable(expandoColumn.getTableId());

    DBCollection dbCollection = MongoDBUtil.getCollection(expandoTable);

    for (DBObject indexDBObject : dbCollection.getIndexInfo()) {
        DBObject keyDBObject = (DBObject) indexDBObject.get("key");

        if (keyDBObject.containsField(expandoColumn.getName())) {
            dbCollection.dropIndex(keyDBObject);
        }/*from  w  w w. j  a  va  2s. c om*/
    }

    DBObject operatorDBObject = new BasicDBObject(MongoOperator.UNSET,
            new BasicDBObject(expandoColumn.getName(), 1));

    dbCollection.update(new BasicDBObject(), operatorDBObject, false, true);
}

From source file:com.lowereast.guiceymongo.GuiceyCollection.java

License:Apache License

public Item findAndModify(DBObject query, DBObject update, DBObject sort, boolean returnNewValue,
        FieldSet fields, boolean upsert) {
    DBObject command = new BasicDBObject("findandmodify", _collection.getName()).append("update", update);
    if (query != null && !EMPTY.equals(query))
        command.put("query", query);
    if (sort != null && !EMPTY.equals(sort))
        command.put("sort", sort);
    if (returnNewValue)
        command.put("new", true);
    if (fields != null)
        command.put("fields", fields.build());
    if (upsert)/* w  ww .j a  va2 s  . c  om*/
        command.put("upsert", upsert);

    DBObject result = _collection.getDB().command(command);
    if (result.containsField("ok"))
        return _wrapper.wrap((DBObject) result.get("value"));
    throw new MongoException((Integer) result.get("code"), (String) result.get("errmsg"));
}

From source file:com.machinelinking.storage.mongodb.MongoDocument.java

License:Apache License

public static MongoDocument unwrap(DBObject in) {
    if (in == null)
        return null;
    if (in.containsField(ID_FIELD) && in.containsField(VERSION_FIELD) && in.containsField(NAME_FIELD)
            && in.containsField(CONTENT_FIELD))
        return new MongoDocument(in);
    else/* w  w w  . j  a va2 s .  com*/
        throw new IllegalArgumentException();
}

From source file:com.mobileman.kuravis.core.domain.util.EntityUtils.java

License:Apache License

/**
 * @param entity//from  w  w  w . jav a 2s  .c o m
 * @return entity
 */
public static DBObject setBasePropertiesOnCreate(DBObject entity) {
    entity.put(CREATED_ON, new Date());
    entity.put(MODIFIED_ON, entity.get(CREATED_ON));
    if (!entity.containsField(ID)) {
        entity.put(ID, newId());
    }

    return entity;
}

From source file:com.mobileman.kuravis.core.domain.util.EntityUtils.java

License:Apache License

public static void copyAttribute(String propertyName, DBObject source, DBObject target) {
    if (source.containsField(propertyName)) {
        target.put(propertyName, source.get(propertyName));
    }//from  ww  w  .j ava 2 s  .c o m
}

From source file:com.mobileman.kuravis.core.domain.util.UserUtils.java

License:Apache License

/**
 * @param source//from  w  w  w  .  j  a  v  a  2 s  .  co  m
 * @param target
 */
public static void copyUser(DBObject source, DBObject target) {
    for (String property : new String[] { "email", "name", "gender", "yearOfBirth", "state", "settings",
            EntityUtils.ID }) {
        if (source.containsField(property)) {
            if (Map.class.isInstance(source.get(property))) {
                target.put(property, new BasicDBObject(Map.class.cast(source.get(property))));
            } else {
                target.put(property, source.get(property));
            }

        }
    }
}

From source file:com.mobileman.kuravis.core.domain.util.UserUtils.java

License:Apache License

/**
 * @param user//from  w  ww. ja v  a 2 s . co m
 * @return gender if exists, else UNKNOWN 
 */
public static String getGender(DBObject user) {
    if (user != null) {
        if (!user.containsField(User.ATTR_GENDER) || user.get(User.ATTR_GENDER) == null) {
            return Gender.UNKNOWN.getValue();
        }

        return (String) user.get(User.ATTR_GENDER);
    }

    return null;
}