Example usage for com.mongodb.client.model Filters eq

List of usage examples for com.mongodb.client.model Filters eq

Introduction

In this page you can find the example usage for com.mongodb.client.model Filters eq.

Prototype

public static <TItem> Bson eq(final String fieldName, @Nullable final TItem value) 

Source Link

Document

Creates a filter that matches all documents where the value of the field name equals the specified value.

Usage

From source file:com.project.shlok.TopicsAnalyzer.TweetAnalytics.java

/**
   * getNegativeSentimentKeywords//from  w w  w.  java  2 s.  com
   * Gets negative keywords from the database
   * @param String - query to be tracked
   * @return ArrayList<HashMap<String,Integer>> - an arraylist of maps containing keywords and their respective weights
   * 
   */
public ArrayList<HashMap<String, Integer>> getNegativeSentimentKeywords(String query) {
    ArrayList<HashMap<String, Integer>> keywordsList = new ArrayList<HashMap<String, Integer>>();
    FindIterable<Document> keywordsIterable = sentimentsCol.find(Filters.eq("query", query));
    Iterator<Document> keywordsIterator = keywordsIterable.iterator();

    if (keywordsIterator.hasNext()) {
        Document keywordsDoc = keywordsIterator.next();
        Document negKeyWordsDoc = (Document) keywordsDoc.get("negwords");

        for (String key : negKeyWordsDoc.keySet()) {
            Integer count = new Integer(negKeyWordsDoc.get(key).toString());
            Sentiment wordsentiment = lexicon.analyze(key);
            // Performs tf-idf on the keyword and assigns weights
            Integer weight = 0;
            double key_tf = 0.0;
            double key_idf = 0.0;
            double key_tfidf = 0.0;
            int normalized_tfidf = 0;
            if (count != 0 && negKeyWordsDoc.size() != 0) {
                key_tf = (float) count / negKeyWordsDoc.size();
                key_idf = Math.log(negKeyWordsDoc.size() / count);
                key_tfidf = key_tf * key_idf;
                normalized_tfidf = (int) (key_tfidf * 100);
            }

            if (!key.equals("https") && !key.equals("http") && !key.contains("http")
                    && !key.contains("https")) {
                if (normalized_tfidf > 40) {
                    weight = normalized_tfidf;
                }
                // if tf-idf score is not beyond threshold, but word is informative and negative
                else if (wordsentiment.getScore() < 0) {
                    weight = 40 + Math.abs(wordsentiment.getScore());
                } else if (wordsentiment.getScore() <= 0 && negativewordslist.indexOf(key) != -1) {
                    Random random = new Random();
                    weight = random.nextInt(40) + 40;
                }
                if (weight != 0) {
                    HashMap<String, Integer> negMap = new HashMap<String, Integer>();
                    negMap.put(key, weight);
                    keywordsList.add(negMap);
                }
            }
        }

    }

    return keywordsList;
}

From source file:com.project.shlok.TopicsAnalyzer.TweetAnalytics.java

/**
   * getNamedEntities/*from ww w  . j ava  2s  .c  om*/
   * Gets named entities from the database
   * @param String - query to be tracked
   * @return ArrayList<HashMap<String,Integer>> - an arraylist of maps containing entities and their respective counts
   * 
   */
public ArrayList<HashMap<String, Integer>> getNamedEntities(String query) {
    ArrayList<HashMap<String, Integer>> entities = new ArrayList<HashMap<String, Integer>>();
    FindIterable<Document> entityIterable = entitiesCol.find(Filters.eq("query", query));
    Iterator<Document> entityIterator = entityIterable.iterator();
    if (entityIterator.hasNext()) {
        Document entityDoc = entityIterator.next();
        Document entitiesDoc = (Document) entityDoc.get("entities");
        for (String entity : entitiesDoc.keySet()) {
            Integer count = new Integer(entitiesDoc.get(entity).toString());
            if (count > 10) {
                String cleanedEntity = entity.replace("+", " ");
                if (!cleanedEntity.contains(query) && !query.contains(cleanedEntity)
                        && !cleanedEntity.equals("RT")) {
                    HashMap<String, Integer> map = new HashMap<String, Integer>();
                    map.put(cleanedEntity, count);
                    entities.add(map);
                }
            }
        }
    }

    return entities;
}

From source file:com.project.shlok.TopicsAnalyzer.TwitterReader.java

/**
   * insertSentimentsAndKeywords/* w w  w  .j  a v  a 2s.c  o  m*/
   * Inserts and updates the sentiment of a tweet and its keywords into the database
   * @param String query - the query associated with a tweet
   *       int sentiment - the sentiment of a tweet, 1,0 or -1
   *       HashMap<String,Integer> keywords - the keywords extracted from a tweet and their counts
   * @return None
   * 
   */
public void insertSentimentsAndKeywords(String query, int sentiment, HashMap<String, Integer> keywords) {

    HashMap<String, Integer> newPosWords = new HashMap<String, Integer>();
    HashMap<String, Integer> newNegWords = new HashMap<String, Integer>();
    int newPosCount = 0;
    int newNegCount = 0;
    if (sentiment == 1) {
        newPosWords = keywords;
        newPosCount = 1;
    }
    if (sentiment == -1) {
        newNegWords = keywords;
        newNegCount = 1;
    }

    FindIterable<Document> sentimentIterable = sentimentsCol.find(Filters.eq("query", query));
    Iterator<Document> sentimentIterator = sentimentIterable.iterator();

    if (sentimentIterator.hasNext()) {
        Document sentiDoc = sentimentIterator.next();
        Integer storedPosCount = sentiDoc.getInteger("posCount");
        Integer storedNegCount = sentiDoc.getInteger("negCount");
        Document storedPosWords = (Document) sentiDoc.get("poswords");
        Document storedNegWords = (Document) sentiDoc.get("negwords");
        for (String posword : newPosWords.keySet()) {
            Sentiment poSentiment = sentimentLexicon.analyze(posword);
            if (storedPosWords.containsKey(posword)) {

                if (poSentiment.getScore() >= 0) {
                    storedPosWords.put(posword,
                            ((Integer) storedPosWords.get(posword)) + newPosWords.get(posword));
                }
            } else {
                if (poSentiment.getScore() >= 0) {
                    storedPosWords.put(posword, newPosWords.get(posword));
                }
            }
        }

        for (String negword : newNegWords.keySet()) {
            Sentiment negSentiment = sentimentLexicon.analyze(negword);
            if (storedNegWords.containsKey(negword)) {
                if (negSentiment.getScore() >= 0) {
                    storedNegWords.put(negword,
                            ((Integer) storedNegWords.get(negword)) + newPosWords.get(negword));
                }
            } else {
                if (negSentiment.getScore() >= 0) {
                    storedNegWords.put(negword, newNegWords.get(negword));
                }
            }
        }

        Document updateDoc = new Document("posCount", storedPosCount + newPosCount)
                .append("negCount", storedNegCount + newNegCount).append("poswords", storedPosWords)
                .append("negwords", storedNegWords);
        sentimentsCol.updateOne(new Document("_id", sentiDoc.get("_id")), new Document("$set", updateDoc));
    } else {
        Document sentiDoc = new Document("query", query);
        sentiDoc.append("posCount", newPosCount);
        sentiDoc.append("negCount", newNegCount);
        sentiDoc.append("poswords", newPosWords);
        sentiDoc.append("negwords", newNegWords);
        sentimentsCol.insertOne(sentiDoc);
    }

}

From source file:com.project.shlok.TopicsAnalyzer.TwitterReader.java

/**
   * insertEntities// w  w w .  ja v  a2s  .c  o m
   * Inserts and updates the named entities extracted from a tweet into the database
   * @param String query - the query associated with a tweet
   *       HashMap<String,Integer> entities - the entities extracted from a tweet and their counts
   * @return None
   * 
   */
public void insertEntities(String query, HashMap<String, Integer> entities) {
    FindIterable<Document> entityIterable = entitiesCol.find(Filters.eq("query", query));
    Iterator<Document> entityIterator = entityIterable.iterator();

    if (entityIterator.hasNext()) {
        Document entityDoc = entityIterator.next();
        Document storedEntityDoc = (Document) entityDoc.get("entities");
        for (String entity : entities.keySet()) {
            if (storedEntityDoc.containsKey(entity)) {
                Integer entityValue = (Integer) storedEntityDoc.get(entity);
                storedEntityDoc.put(entity, entityValue + entities.get(entity));
            } else {
                storedEntityDoc.put(entity, entities.get(entity));
            }
        }

        Document updateDoc = new Document("entities", storedEntityDoc);
        entitiesCol.updateOne(new Document("_id", entityDoc.get("_id")), new Document("$set", updateDoc));
    } else {
        Document newEntityDoc = new Document("query", query);
        newEntityDoc.append("entities", entities);
        entitiesCol.insertOne(newEntityDoc);

    }
}

From source file:com.project.shlok.TopicsAnalyzer.TwitterReader.java

/**
   * insertStories/*from   w  w w  .java  2 s  .c  om*/
   * Inserts and updates the trending stories extracted from a tweet into the database
   * @param String query - the query associated with a tweet
   *       HashMap<String,Integer> storiesMap - a map of stories and their respective counts
   * @return None
   * 
   */
public void insertStories(String query, HashMap<String, Integer> storiesMap) {
    FindIterable<Document> storyIterable = storiesCol.find(Filters.eq("query", query));
    Iterator<Document> storyIterator = storyIterable.iterator();
    if (storyIterator.hasNext()) {
        if (!storiesMap.isEmpty()) {
            Document storyDoc = storyIterator.next();
            Document storyDocstories = (Document) storyDoc.get("stories");
            for (String key : storiesMap.keySet()) {
                if (storyDocstories.containsKey(key)) {
                    Integer newCount = storiesMap.get(key);
                    storyDocstories.put(key, newCount);
                } else {
                    storyDocstories.put(key, storiesMap.get(key));
                }
            }

            Document updateDoc = new Document("stories", storyDocstories);
            storiesCol.updateOne(new Document("_id", storyDoc.get("_id")), new Document("$set", updateDoc));
        }
    } else {
        Document newStoryDoc = new Document("query", query);
        newStoryDoc.append("stories", storiesMap);
        storiesCol.insertOne(newStoryDoc);
    }
}

From source file:com.px100systems.data.plugin.storage.mongo.FilterQueryBuilder.java

License:Open Source License

@Override
public Bson convert(eq predicate) {
    return Filters.eq(predicate.getMember(), predicate.getValue());
}

From source file:com.px100systems.data.plugin.storage.mongo.MongoDatabaseStorage.java

License:Open Source License

@SuppressWarnings("unchecked")
public <T> T get(String unitName, Class<T> cls, Long id) {
    MongoDatabase db = mongoClient.getDatabase(databaseName);
    Document doc = db.getCollection(unitName).find(Filters.eq("id", id)).limit(1).first();
    if (doc == null)
        return null;

    SerializationDefinition def = SerializationDefinition.get(cls);
    if (def == null)
        throw new RuntimeException("Cannot find SerializedDefinition for " + cls.getSimpleName());

    T result = (T) def.newInstance();// www.  j  a  va 2 s. com
    def.read(doc, result);
    return result;
}

From source file:com.px100systems.data.plugin.storage.mongo.MongoDatabaseStorage.java

License:Open Source License

private void batchSave(MongoDatabase db, List<StoredBean> inserts, List<StoredBean> updates,
        List<Delete> deletes) {
    Map<String, List<WriteModel<Document>>> batches = new HashMap<>();

    for (StoredBean bean : inserts) {
        String unitName = bean.unitName();
        List<WriteModel<Document>> batch = batches.get(unitName);
        if (batch == null) {
            batch = new ArrayList<>();
            batches.put(unitName, batch);
        }//ww  w.j a v a2  s. com

        batch.add(new InsertOneModel<Document>(serialize(bean)));
    }

    for (StoredBean bean : updates) {
        String unitName = bean.unitName();
        List<WriteModel<Document>> batch = batches.get(unitName);
        if (batch == null) {
            batch = new ArrayList<>();
            batches.put(unitName, batch);
        }

        batch.add(new ReplaceOneModel<Document>(Filters.eq("id", bean.getId()), serialize(bean)));
    }

    for (Delete delete : deletes) {
        String unitName = delete.getUnitName();
        List<WriteModel<Document>> batch = batches.get(unitName);
        if (batch == null) {
            batch = new ArrayList<>();
            batches.put(unitName, batch);
        }

        batch.add(delete.getId() == null
                ? new DeleteManyModel<Document>(delete.getCriteria().convert(new FilterQueryBuilder()))
                : new DeleteOneModel<Document>(Filters.eq("id", delete.getId())));
    }

    for (Map.Entry<String, List<WriteModel<Document>>> e : batches.entrySet())
        db.getCollection(e.getKey()).bulkWrite(e.getValue());
}

From source file:com.streamsets.pipeline.stage.origin.mongodb.oplog.MongoDBOplogSource.java

License:Apache License

private void prepareCursor(int timestampSeconds, int ordinal, List<OplogOpType> filterOplogTypes,
        int batchSize) {
    LOG.debug("Getting new cursor with offset - TimeStampInSeconds:'{}', Ordinal : '{}' and Batch Size : '{}'",
            timestampSeconds, ordinal, batchSize);
    FindIterable<Document> mongoCursorIterable = mongoCollection.find()
            //As the collection is a capped collection we use Tailable cursor which will return results in natural order in this case
            //based on ts timestamp field.
            //Tailable Await does not return and blocks, so we are using tailable.
            .cursorType(CursorType.Tailable).batchSize(batchSize);

    List<Bson> andFilters = new ArrayList<>();
    //Only filter if we already have saved/initial offset specified or else both time_t and ordinal will not be -1.
    if (timestampSeconds > 0 && ordinal >= 0) {
        andFilters.add(Filters.gt(TIMESTAMP_FIELD, new BsonTimestamp(timestampSeconds, ordinal)));
    }//from w w  w.  ja va 2  s . com

    if (!filterOplogTypes.isEmpty()) {
        List<Bson> oplogOptypeFilters = new ArrayList<>();
        Set<OplogOpType> oplogOpTypesSet = new HashSet<>();
        for (OplogOpType filterOplogopType : filterOplogTypes) {
            if (oplogOpTypesSet.add(filterOplogopType)) {
                oplogOptypeFilters.add(Filters.eq(OP_TYPE_FIELD, filterOplogopType.getOp()));
            }
        }
        //Add an or filter for filtered Or Types
        andFilters.add(Filters.or(oplogOptypeFilters));
    }
    //Finally and timestamp with oplog filters
    if (!andFilters.isEmpty()) {
        mongoCursorIterable = mongoCursorIterable.filter(Filters.and(andFilters));
    }
    cursor = mongoCursorIterable.iterator();
}

From source file:course.homework.Homework2_3.java

License:Apache License

public static void main(String[] args) {

    MongoClient client = new MongoClient();

    MongoDatabase database = client.getDatabase("students");
    final MongoCollection<Document> collection = database.getCollection("grades");

    Bson filter = Filters.eq("type", "homework");

    Bson sort = Sorts.descending("student_id", "score");

    Set<Object> keys = new HashSet<>();
    collection.find(filter).sort(sort).into(new ArrayList<Document>()).stream().forEach(doc -> {
        if (keys.contains(doc.get("student_id"))) {
            System.out.println("Already exists " + doc.get("student_id") + " = " + doc.get("score"));
            collection.deleteOne(Filters.eq("_id", doc.getObjectId("_id")));
        } else {/*  ww  w.  j  a v a2 s. com*/
            System.out.println("Does not exist " + doc.get("student_id") + " = " + doc.get("score"));
            keys.add(doc.get("student_id"));
        }
    });
}