Example usage for io.vertx.core.json JsonObject containsKey

List of usage examples for io.vertx.core.json JsonObject containsKey

Introduction

In this page you can find the example usage for io.vertx.core.json JsonObject containsKey.

Prototype

public boolean containsKey(String key) 

Source Link

Document

Does the JSON object contain the specified key?

Usage

From source file:com.baldmountain.depot.models.Product.java

License:Open Source License

public Product(JsonObject json) {
    super("products", json);
    title = json.getString("title");
    description = json.getString("description");
    imageUrl = json.getString("imageUrl");
    if (json.containsKey("price")) {
        price = new BigDecimal(json.getDouble("price")).setScale(2, RoundingMode.CEILING);
    } else {/*from   ww w .  jav  a2s .  c  o  m*/
        price = BigDecimal.ZERO.setScale(2, RoundingMode.CEILING);
    }
}

From source file:com.cyngn.vertx.opentsdb.client.TsMetric.java

License:Apache License

/**
 * All metrics need a name and value field set.
 *
 * @param jsonObject the object to validate
 * @return true if metric has the minimum data false otherwise
 *///from   ww  w .  j  a  v a 2s.  c o  m
public static boolean isValid(JsonObject jsonObject) {
    return jsonObject.containsKey(MetricsParser.NAME_FIELD)
            && jsonObject.containsKey(MetricsParser.VALUE_FIELD);
}

From source file:com.github.jackygurui.vertxredissonrepository.repository.Impl.RedisRepositoryImpl.java

License:Apache License

private void create(String data, RBatch redissonBatch, AsyncResultHandler<String> resultHandler) {
    vertx.<JsonObject>executeBlocking(f -> {
        JsonObject d = new JsonObject(data);
        if (schema == null && !f.isComplete()) {
            f.complete(d);//from   w w  w  .j a  va 2s.co  m
            return;
        }
        try {
            ProcessingReport validate = VALIDATOR.validate(schema, Json.mapper.readTree(data));
            if (validate.isSuccess() && !d.containsKey("id") && !f.isComplete()) {
                f.complete(d);
            } else if (!f.isComplete()) {
                f.fail(new RepositoryException("Invalid Data"));
            }
        } catch (ProcessingException | IOException ex) {
            if (!f.isComplete()) {
                f.fail(ex);
            }
        }
    }, result -> {
        if (result.succeeded()) {
            createWithoutValidate(result.result(), redissonBatch, resultHandler);
        } else {
            resultHandler.handle(Future.failedFuture(result.cause()));
        }
    });
}

From source file:com.github.jackygurui.vertxredissonrepository.repository.Impl.RedisRepositoryImpl.java

License:Apache License

private void update(String id, String data, RBatch redissonBatch, Handler<AsyncResult<Boolean>> resultHandler) {
    vertx.<JsonObject>executeBlocking(f -> {
        JsonObject d = new JsonObject(data);
        if (schema == null && !f.isComplete()) {
            f.complete(d);//from   ww  w  . j  av a2 s .c  o  m
            return;
        }
        try {
            ProcessingReport validate = VALIDATOR.validate(schema, Json.mapper.readTree(data));
            if (id != null && validate.isSuccess() && d.containsKey("id") && id.equals(d.getString("id"))
                    && !f.isComplete()) {
                f.complete(d);
            } else if (!f.isComplete()) {
                f.fail(new RepositoryException("Invalid Data"));
            }
        } catch (ProcessingException | IOException ex) {
            if (!f.isComplete()) {
                f.fail(ex);
            }
        }
    }, result -> {
        if (result.succeeded()) {
            updateWithoutValidate(id, result.result(), redissonBatch, resultHandler);
        } else {
            resultHandler.handle(Future.failedFuture(result.cause()));
        }
    });
}

From source file:com.github.jackygurui.vertxredissonrepository.repository.Impl.RedisRepositoryImpl.java

License:Apache License

private void persistBlocking(String id, JsonObject data, RBatch redissonBatch,
        Handler<AsyncResult<Boolean>> resultHandler) {
    RBatch batch = redissonBatch == null ? redissonWrite.createBatch() : redissonBatch;
    AtomicBoolean failed = new AtomicBoolean(false);
    try {//from   w w w  .ja va 2  s  .com
        BeanMap pMap = new BeanMap(cls.newInstance());
        //remove the indexes;
        if (isRedisEntity()) {
            AtomicBoolean finished = new AtomicBoolean(false);
            AtomicBoolean hasNested = new AtomicBoolean(false);
            AtomicLong stack = new AtomicLong();
            pMap.forEach((k, v) -> {
                if ("class".equals(k)) {
                    return;
                }
                Class<?> type = pMap.getType((String) k);
                if (!isRedisEntity(type)) {
                    //recreate the indexes;
                    if ("id".equals(k)) {
                        batch.getMap(getStorageKey(), StringCodec.INSTANCE).fastPutAsync(id, id);
                    } else {
                        batch.getMap(getStorageKey((String) k)).fastPutAsync(id, data.getValue((String) k));
                    }
                } else {
                    hasNested.set(true);
                    stack.incrementAndGet();
                    RedisRepositoryImpl<?> innerRepo;
                    try {
                        innerRepo = (RedisRepositoryImpl) factory.instance(type);
                    } catch (RepositoryException e) {
                        throw new RuntimeException(e);
                    }
                    JsonObject value = data.getJsonObject((String) k);
                    final boolean newOne = !value.containsKey("id") || value.getString("id") == null
                            || "null".equals(value.getString("id"));
                    final String ID = newOne ? id : value.getString("id");
                    innerRepo.persist(ID, value, batch, c -> {//making the nested entity shares the same id as the parent when its 1:1 relation. This makes fetch a lot faster since it doesn't not need to resolve the reference when fetching 1:1 nested objects.
                        if (c.succeeded()) {
                            long s = stack.decrementAndGet();
                            if (newOne) {
                                batch.getMap(getStorageKey((String) k)).fastPutAsync(id, ID);//different to the update, create needs to add the reference field to batch
                            }
                            if (s == 0 && finished.get() && !failed.get()) { //finished iterating and no outstanding processes. 
                                if (redissonBatch == null) {//if it's not inside a nested process.
                                    finishPersist(id, data, batch, resultHandler);
                                } else {//if it is inside a nested process.
                                    resultHandler.handle(Future.succeededFuture(true));
                                }
                            }
                            //else wait for others to complete
                        } else {
                            boolean firstToFail = failed.compareAndSet(false, true);
                            if (firstToFail) {
                                resultHandler.handle(Future.failedFuture(c.cause()));
                            }
                        }
                    });
                }
            });
            batch.getAtomicLongAsync(getCounterKey()).incrementAndGetAsync();
            finished.set(true);
            if (!hasNested.get()) {//does not have nested RedissonEntity within
                if (redissonBatch == null) {//if it's not inside a nested process.
                    finishPersist(id, data, batch, resultHandler);
                } else {//if it is inside a nested process.
                    resultHandler.handle(Future.succeededFuture(true));
                }
            }
        } else {//not a RedissonEntity class, persist as json string.
            //recreate the indexes;
            batch.<String, String>getMap(getStorageKey(), StringCodec.INSTANCE).fastPutAsync(id,
                    Json.encode(data));
            batch.getAtomicLongAsync(getCounterKey()).incrementAndGetAsync();
            if (redissonBatch == null) {//if it's not inside a nested process.
                finishPersist(id, data, batch, resultHandler);
            } else {//if it is inside a nested process.
                resultHandler.handle(Future.succeededFuture(true));
            }
        }
    } catch (InstantiationException | IllegalAccessException | RuntimeException ex) {
        failed.set(true);
        resultHandler.handle(Future.failedFuture(ex));
    }
}

From source file:com.hpe.sw.cms.verticle.MongoStoreVerticle.java

License:Apache License

@Override
public void start() throws Exception {
    super.start();
    client = MongoClient.createShared(vertx, config().getJsonObject("mongo"));
    vertx.eventBus().consumer(Events.GET_IMAGES.name(), msg -> {
        JsonObject param = (JsonObject) msg.body();
        JsonObject query = new JsonObject();
        if (param != null && param.getString("timestamp") != null) {
            Long timestamp = Long.parseLong(param.getString("timestamp"));
            query.put(Image.TIMESTAMP, new JsonObject().put("$gte", timestamp));
        } else if (param != null && param.getString("imageid") != null) {
            query.put(Image.IMAGE_ID, param.getString(Image.IMAGE_ID));
        }/*  w w w  .  j a  va 2 s .c  o m*/

        if (!query.containsKey(Image.IMAGE_ID) && (param == null || param.getString("include") == null
                || !"all".equals(param.getString("include")))) {
            query.put(Image.IMAGE_ID, new JsonObject().put("$exists", true));
        }
        JsonArray images = new JsonArray();
        client.find("images", query, res -> {
            if (res.succeeded()) {
                List<JsonObject> result = res.result();
                for (JsonObject dbImage : result) {
                    images.add(Image.cloneImage(dbImage));
                }
                msg.reply(images);
            }
        });
    });

    vertx.eventBus().consumer(Events.DOWNLOAD_FILE.name(), msg -> {
        JsonObject query = (JsonObject) msg.body();
        LOG.debug("DOWNLOAD_FILE query is " + query);
        client.find("images", query, res -> {
            if (res.succeeded()) {
                List<JsonObject> result = res.result();
                LOG.debug("DOWNLOAD_FILE result is " + result.size());
                if (result.size() > 0) {
                    msg.reply(result.get(0));
                } else {
                    msg.reply(null);
                }
            }
        });
    });

    vertx.eventBus().consumer(Events.IMAGES_UPDATED.name(), msg -> {
        JsonArray updates = new JsonArray();
        JsonObject query = new JsonObject();
        query.put(Image.IS_SCANNED, false);
        int fetchSize = Integer.valueOf(String.valueOf(msg.body()));
        FindOptions options = new FindOptions();
        JsonObject sort = new JsonObject();
        sort.put(Image.TIMESTAMP, -1);
        options.setLimit(fetchSize).setSort(sort);
        client.findWithOptions("images", query, options, res -> {
            if (res.succeeded()) {
                List<JsonObject> result = res.result();
                for (JsonObject update : result) {
                    updates.add(update);
                    LOG.debug("get image from DB :" + Image.getImageKey(update));
                }
                LOG.debug("IMAGES_UPDATED reply updates size " + updates.size());
                msg.reply(updates);
            }
        });
    });

    vertx.eventBus().consumer(Events.SCANFILE_UPLOADED.name(), msg -> {
        JsonObject upFile = (JsonObject) msg.body();
        JsonObject query = new JsonObject();
        query.put(Image.HOST, upFile.getString(Image.HOST)).put(Image.NAME, upFile.getString(Image.NAME))
                .put(Image.TAG, upFile.getString(Image.TAG));
        client.find("images", query, res -> {
            if (res.succeeded()) {
                List<JsonObject> result = res.result();
                if (result.size() == 0) {
                    LOG.error("no mapped image in DB for " + Image.getImageKey(upFile));
                    return;
                }
                for (JsonObject dbImage : result) {
                    if (upFile.getBoolean("isScanFailed")) {
                        //Failed in scanning.
                        LOG.info("store failed scan to DB " + Image.getImageKey(upFile));
                        dbImage.put(Image.IS_SCANNED, true);
                        dbImage.put(Image.IS_SCANNED_FAILED, true);
                    } else {
                        //successfully in scanning.
                        LOG.info("store scanfile to DB " + Image.getImageKey(upFile));
                        dbImage.put(Image.IS_SCANNED, true);
                        dbImage.put(Image.IS_SCANNED_FAILED, false);
                        dbImage.put(Image.IMAGE_ID, upFile.getString(Image.IMAGE_ID));
                        dbImage.put(Image.SCANNED_FILE, upFile.getBinary(Image.SCANNED_FILE));
                    }
                    client.save("images", dbImage, h -> {
                        if (h.succeeded()) {
                            LOG.info("SCANFILE_UPLOADED:Image " + Image.getImageKey(dbImage) + " updated !");
                        } else {
                            h.cause().printStackTrace();
                        }
                    });
                }
            }
        });

    });

    vertx.eventBus().consumer(Events.ENRICHFILE_UPLOADED.name(), msg -> {
        JsonArray upFiles = (JsonArray) msg.body();
        for (Object upFileObj : upFiles) {
            JsonObject upFile = (JsonObject) upFileObj;
            if (upFile.getBinary("enrichedFile") == null) {
                LOG.info("enrichedFile is emptry for " + upFile.getString("imageid"));
                continue;
            }
            LOG.info("store enrichfile to DB " + upFile.getString("imageid"));
            JsonObject query = new JsonObject();
            query.put(Image.IMAGE_ID, upFile.getString(Image.IMAGE_ID));
            client.find("images", query, res -> {
                if (res.succeeded()) {
                    List<JsonObject> result = res.result();
                    for (JsonObject dbImage : result) {
                        dbImage.put(Image.IS_ENRICHED, true);
                        dbImage.put(Image.ENRICHED_FILE, upFile.getBinary(Image.ENRICHED_FILE));
                        client.save("images", dbImage, h -> {
                            if (h.succeeded()) {
                                LOG.info("ENRICHFILE_UPLOADED:Image " + Image.getImageKey(dbImage)
                                        + " updated !");
                            } else {
                                h.cause().printStackTrace();
                            }
                        });
                    }
                }
            });
        }

    });

    vertx.eventBus().consumer(Events.IMAGE_TO_ENRICH.name(), msg -> {
        JsonObject query = new JsonObject();
        query.put(Image.IS_SCANNED, true).put(Image.IS_SCANNED_FAILED, false).put(Image.IS_ENRICHED, false);
        client.find("images", query, res -> {
            if (res.succeeded()) {
                List<JsonObject> result = res.result();
                msg.reply(new JsonArray(result));
            }
        });
    });

    vertx.eventBus().consumer(Events.NEW_IMAGE.name(), msg -> {
        //to store events in
        JsonObject obj = (JsonObject) msg.body();
        JsonObject query = new JsonObject();
        query.put(Image.HOST, obj.getString(Image.HOST)).put(Image.NAME, obj.getString(Image.NAME))
                .put(Image.TAG, obj.getString(Image.TAG));
        client.find("images", query, res -> {
            if (res.succeeded()) {
                List<JsonObject> result = res.result();
                if (result.isEmpty()) {
                    //inserted
                    client.insert("images", obj, h -> {
                        if (h.succeeded()) {
                            LOG.info("IMAGES_COMMING :Image " + Image.getImageKey(obj) + " inserted !");
                        } else {
                            h.cause().printStackTrace();
                        }
                    });
                } else if (result.size() == 1) {
                    JsonObject toUpdate = result.get(0);
                    if (!obj.getString(Image.SIGN).equals(toUpdate.getString(Image.SIGN))) {
                        toUpdate.put(Image.TIMESTAMP, obj.getLong(Image.TIMESTAMP))
                                .put(Image.SIGN, obj.getString(Image.SIGN))
                                .put(Image.IS_SCANNED, obj.getBoolean(Image.IS_SCANNED))
                                .put(Image.IS_ENRICHED, obj.getBoolean(Image.IS_ENRICHED));
                        //saved
                        client.save("images", toUpdate, h -> {
                            if (h.succeeded()) {
                                LOG.info("IMAGES_COMMING :Image " + Image.getImageKey(obj) + " updated !");
                            } else {
                                h.cause().printStackTrace();
                            }
                        });
                    } else {
                        LOG.info("IMAGES_COMMING :Image " + Image.getImageKey(obj)
                                + " has the same sign with the coming image, so will not update to DB !");
                    }
                } else {
                    throw new RuntimeException(
                            "IMAGES_COMMING :Found " + result.size() + " image for " + Image.getImageKey(obj));
                }
            }
        });
    });
}

From source file:com.themonkee.vertx.web.impl.MongoSessionStoreImpl.java

License:Open Source License

public MongoSessionStoreImpl(Vertx vertx, MongoClient mongoClient, JsonObject options,
        Future<MongoSessionStore> resultHandler) {
    this.random = new PRNG(vertx);
    this.vertx = vertx;
    this.mongoClient = mongoClient;
    if (options != null) {
        if (options.containsKey("collection"))
            this.sessionCollection = options.getString("collection");
    }/*w  w  w.  j  av a 2s. c o  m*/

    Future<Void> futCreateColl = Future.future();
    // try to create collection, if it is created or already exists its OK
    this.mongoClient.createCollection(this.sessionCollection, (AsyncResult<Void> res) -> {
        if (res.succeeded() || res.cause().getMessage().contains("collection already exists")) {
            futCreateColl.complete();
        } else {
            futCreateColl.fail(res.cause());
        }
    });

    futCreateColl.compose(v -> {
        // create the session expiry index
        // SessionImpl sets _expire field to Date when session document must be deleted on save
        // so we set expireAfterSeconds to 0 so its deleted when that Date is hit
        // see https://docs.mongodb.com/manual/tutorial/expire-data/
        this.mongoClient.createIndexWithOptions(this.sessionCollection,
                new JsonObject().put(SessionImpl.EXPIRE_FIELD, 1),
                new IndexOptions().expireAfter(0L, TimeUnit.SECONDS), res -> {
                    if (res.succeeded()) {
                        resultHandler.complete(this);
                    } else {
                        resultHandler.fail(res.cause());
                    }
                });
    }, resultHandler);
}

From source file:eu.rethink.mn.pipeline.handlers.ValidatorPipeHandler.java

License:Apache License

@Override
public void handle(PipeContext ctx) {
    final PipeMessage msg = ctx.getMessage();

    //header validation...
    final JsonObject json = msg.getJson();

    if (!json.containsKey("id")) {
        ctx.fail(NAME, "No mandatory field 'id'");
    }/*from w w  w. j a  v a  2s.c o m*/

    if (!json.containsKey("type")) {
        ctx.fail(NAME, "No mandatory field 'type'");
    }

    final String from = json.getString("from");
    if (from == null) {
        ctx.fail(NAME, "No mandatory field 'from'");
    }

    final String to = json.getString("to");
    if (to == null) {
        ctx.fail(NAME, "No mandatory field 'to'");
    }

    ctx.next();
}

From source file:fr.wseduc.rack.controllers.RackController.java

License:Open Source License

private void deleteFile(final HttpServerRequest request, final String owner) {
    final String id = request.params().get("id");
    rackService.getRack(id, new Handler<Either<String, JsonObject>>() {
        @Override/*w w w . jav  a2 s .c  o m*/
        public void handle(Either<String, JsonObject> event) {
            if (event.isRight()) {
                final JsonObject result = event.right().getValue();

                String file = result.getString("file");
                Set<Entry<String, Object>> thumbnails = new HashSet<Entry<String, Object>>();
                if (result.containsKey("thumbnails")) {
                    thumbnails = result.getJsonObject("thumbnails").getMap().entrySet();
                }

                storage.removeFile(file, new Handler<JsonObject>() {
                    @Override
                    public void handle(JsonObject event) {
                        if (event != null && "ok".equals(event.getString("status"))) {
                            rackService.deleteRack(id, new Handler<Either<String, JsonObject>>() {
                                @Override
                                public void handle(Either<String, JsonObject> deletionEvent) {
                                    if (deletionEvent.isRight()) {
                                        JsonObject deletionResult = deletionEvent.right().getValue();
                                        long size = -1l * result.getJsonObject("metadata", new JsonObject())
                                                .getLong("size", 0l);
                                        updateUserQuota(owner, size);
                                        renderJson(request, deletionResult, 204);
                                    } else {
                                        badRequest(request, deletionEvent.left().getValue());
                                    }
                                }
                            });
                        } else {
                            renderError(request, event);
                        }
                    }
                });

                //Delete thumbnails
                for (final Entry<String, Object> thumbnail : thumbnails) {
                    storage.removeFile(thumbnail.getValue().toString(), new Handler<JsonObject>() {
                        @Override
                        public void handle(JsonObject event) {
                            if (event == null || !"ok".equals(event.getString("status"))) {
                                logger.error("[gridfsRemoveFile] Error while deleting thumbnail " + thumbnail);
                            }
                        }
                    });
                }

            } else {
                JsonObject error = new JsonObject().put("error", event.left().getValue());
                Renders.renderJson(request, error, 400);
            }
        }
    });
}

From source file:io.apiman.gateway.engine.vertx.polling.PolicyConfigLoader.java

License:Apache License

@SuppressWarnings("unchecked")
private <T, K> List<T> requireJsonArray(String keyName, JsonObject json, Class<K> klazz) {
    // Contains key.
    Arguments.require(json.containsKey(keyName), String.format("Must provide array of %s objects for key '%s'",
            StringUtils.capitalize(keyName), keyName));
    // Is of type array.
    Arguments.require(json.getValue(keyName) instanceof JsonArray,
            String.format("'%s' must be a Json array", keyName));
    // Transform into List<T>.
    return Json.decodeValue(json.getJsonArray(keyName).encode(), List.class, klazz);
}