Example usage for org.json.simple JSONValue toJSONString

List of usage examples for org.json.simple JSONValue toJSONString

Introduction

In this page you can find the example usage for org.json.simple JSONValue toJSONString.

Prototype

public static String toJSONString(Object value) 

Source Link

Usage

From source file:org.apache.falcon.workflow.WorkflowExecutionContext.java

/**
 * this method is invoked from with in the workflow.
 *
 * @param contextFile file to serialize the workflow execution metadata
 * @throws org.apache.falcon.FalconException
 *//*  ww  w.j a v a 2 s  . c om*/
public void serialize(String contextFile) throws FalconException {
    LOG.info("Saving context to: [{}]", contextFile);
    OutputStream out = null;
    Path file = new Path(contextFile);
    try {
        FileSystem fs = actionJobConf == null ? HadoopClientFactory.get().createProxiedFileSystem(file.toUri())
                : HadoopClientFactory.get().createProxiedFileSystem(file.toUri(), actionJobConf);
        out = fs.create(file);
        out.write(JSONValue.toJSONString(context).getBytes());
    } catch (IOException e) {
        throw new FalconException("Error serializing context to: " + contextFile, e);
    } finally {
        if (out != null) {
            try {
                out.close();
            } catch (IOException ignore) {
                // ignore
            }
        }
    }
}

From source file:org.apache.storm.daemon.StormCommon.java

@SuppressWarnings("unchecked")
public static void addAcker(Map<String, Object> conf, StormTopology topology) {
    int ackerNum = ObjectReader.getInt(conf.get(Config.TOPOLOGY_ACKER_EXECUTORS),
            ObjectReader.getInt(conf.get(Config.TOPOLOGY_WORKERS)));
    Map<GlobalStreamId, Grouping> inputs = ackerInputs(topology);

    Map<String, StreamInfo> outputStreams = new HashMap<String, StreamInfo>();
    outputStreams.put(Acker.ACKER_ACK_STREAM_ID,
            Thrift.directOutputFields(Arrays.asList("id", "time-delta-ms")));
    outputStreams.put(Acker.ACKER_FAIL_STREAM_ID,
            Thrift.directOutputFields(Arrays.asList("id", "time-delta-ms")));
    outputStreams.put(Acker.ACKER_RESET_TIMEOUT_STREAM_ID,
            Thrift.directOutputFields(Arrays.asList("id", "time-delta-ms")));

    Map<String, Object> ackerConf = new HashMap<>();
    ackerConf.put(Config.TOPOLOGY_TASKS, ackerNum);
    ackerConf.put(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS,
            ObjectReader.getInt(conf.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS)));

    Bolt acker = Thrift.prepareSerializedBoltDetails(inputs, makeAckerBolt(), outputStreams, ackerNum,
            ackerConf);/* w w w . j av a  2 s. com*/

    for (Bolt bolt : topology.get_bolts().values()) {
        ComponentCommon common = bolt.get_common();
        common.put_to_streams(Acker.ACKER_ACK_STREAM_ID, Thrift.outputFields(Arrays.asList("id", "ack-val")));
        common.put_to_streams(Acker.ACKER_FAIL_STREAM_ID, Thrift.outputFields(Arrays.asList("id")));
        common.put_to_streams(Acker.ACKER_RESET_TIMEOUT_STREAM_ID, Thrift.outputFields(Arrays.asList("id")));
    }

    for (SpoutSpec spout : topology.get_spouts().values()) {
        ComponentCommon common = spout.get_common();
        Map spoutConf = componentConf(spout);
        spoutConf.put(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS,
                ObjectReader.getInt(conf.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS)));
        common.set_json_conf(JSONValue.toJSONString(spoutConf));
        common.put_to_streams(Acker.ACKER_INIT_STREAM_ID,
                Thrift.outputFields(Arrays.asList("id", "init-val", "spout-task")));
        common.put_to_inputs(Utils.getGlobalStreamId(Acker.ACKER_COMPONENT_ID, Acker.ACKER_ACK_STREAM_ID),
                Thrift.prepareDirectGrouping());
        common.put_to_inputs(Utils.getGlobalStreamId(Acker.ACKER_COMPONENT_ID, Acker.ACKER_FAIL_STREAM_ID),
                Thrift.prepareDirectGrouping());
        common.put_to_inputs(
                Utils.getGlobalStreamId(Acker.ACKER_COMPONENT_ID, Acker.ACKER_RESET_TIMEOUT_STREAM_ID),
                Thrift.prepareDirectGrouping());
    }

    topology.put_to_bolts(Acker.ACKER_COMPONENT_ID, acker);
}

From source file:org.apache.storm.kafka.monitor.KafkaOffsetLagUtil.java

public static void main(String args[]) {
    try {// w  ww.  ja  va  2 s. c o m
        List<KafkaOffsetLagResult> results;
        Options options = buildOptions();
        CommandLineParser parser = new DefaultParser();
        CommandLine commandLine = parser.parse(options, args);
        if (!commandLine.hasOption(OPTION_TOPIC_LONG)) {
            printUsageAndExit(options, OPTION_TOPIC_LONG + " is required");
        }
        if (commandLine.hasOption(OPTION_OLD_CONSUMER_LONG)) {
            OldKafkaSpoutOffsetQuery oldKafkaSpoutOffsetQuery;
            if (commandLine.hasOption(OPTION_GROUP_ID_LONG)
                    || commandLine.hasOption(OPTION_BOOTSTRAP_BROKERS_LONG)
                    || commandLine.hasOption(OPTION_SECURITY_PROTOCOL_LONG)) {
                printUsageAndExit(options,
                        OPTION_GROUP_ID_LONG + " or " + OPTION_BOOTSTRAP_BROKERS_LONG + " or "
                                + OPTION_SECURITY_PROTOCOL_LONG + " is " + "not accepted with option "
                                + OPTION_OLD_CONSUMER_LONG);
            }
            if (!commandLine.hasOption(OPTION_ZK_SERVERS_LONG)
                    || !commandLine.hasOption(OPTION_ZK_COMMITTED_NODE_LONG)) {
                printUsageAndExit(options, OPTION_ZK_SERVERS_LONG + " and " + OPTION_ZK_COMMITTED_NODE_LONG
                        + " are required  with " + OPTION_OLD_CONSUMER_LONG);
            }
            String[] topics = commandLine.getOptionValue(OPTION_TOPIC_LONG).split(",");
            if (topics != null && topics.length > 1) {
                printUsageAndExit(options,
                        "Multiple topics not supported with option " + OPTION_OLD_CONSUMER_LONG
                                + ". Either a single topic or a "
                                + "wildcard string for matching topics is supported");
            }
            if (commandLine.hasOption(OPTION_ZK_BROKERS_ROOT_LONG)) {
                if (commandLine.hasOption(OPTION_PARTITIONS_LONG)
                        || commandLine.hasOption(OPTION_LEADERS_LONG)) {
                    printUsageAndExit(options, OPTION_PARTITIONS_LONG + " or " + OPTION_LEADERS_LONG
                            + " is not accepted with " + OPTION_ZK_BROKERS_ROOT_LONG);
                }
                oldKafkaSpoutOffsetQuery = new OldKafkaSpoutOffsetQuery(
                        commandLine.getOptionValue(OPTION_TOPIC_LONG),
                        commandLine.getOptionValue(OPTION_ZK_SERVERS_LONG),
                        commandLine.getOptionValue(OPTION_ZK_COMMITTED_NODE_LONG),
                        commandLine.hasOption(OPTION_TOPIC_WILDCARD_LONG),
                        commandLine.getOptionValue(OPTION_ZK_BROKERS_ROOT_LONG));
            } else {
                if (commandLine.hasOption(OPTION_TOPIC_WILDCARD_LONG)) {
                    printUsageAndExit(options, OPTION_TOPIC_WILDCARD_LONG + " is not supported without "
                            + OPTION_ZK_BROKERS_ROOT_LONG);
                }
                if (!commandLine.hasOption(OPTION_PARTITIONS_LONG)
                        || !commandLine.hasOption(OPTION_LEADERS_LONG)) {
                    printUsageAndExit(options, OPTION_PARTITIONS_LONG + " and " + OPTION_LEADERS_LONG
                            + " are required if " + OPTION_ZK_BROKERS_ROOT_LONG + " is not provided");
                }
                String[] partitions = commandLine.getOptionValue(OPTION_PARTITIONS_LONG).split(",");
                String[] leaders = commandLine.getOptionValue(OPTION_LEADERS_LONG).split(",");
                if (partitions.length != leaders.length) {
                    printUsageAndExit(options, OPTION_PARTITIONS_LONG + " and " + OPTION_LEADERS_LONG
                            + " need to be of same size");
                }
                oldKafkaSpoutOffsetQuery = new OldKafkaSpoutOffsetQuery(
                        commandLine.getOptionValue(OPTION_TOPIC_LONG),
                        commandLine.getOptionValue(OPTION_ZK_SERVERS_LONG),
                        commandLine.getOptionValue(OPTION_ZK_COMMITTED_NODE_LONG),
                        commandLine.getOptionValue(OPTION_PARTITIONS_LONG),
                        commandLine.getOptionValue(OPTION_LEADERS_LONG));
            }
            results = getOffsetLags(oldKafkaSpoutOffsetQuery);
        } else {
            String securityProtocol = commandLine.getOptionValue(OPTION_SECURITY_PROTOCOL_LONG);
            String[] oldSpoutOptions = { OPTION_TOPIC_WILDCARD_LONG, OPTION_PARTITIONS_LONG,
                    OPTION_LEADERS_LONG, OPTION_ZK_SERVERS_LONG, OPTION_ZK_COMMITTED_NODE_LONG,
                    OPTION_ZK_BROKERS_ROOT_LONG };
            for (String oldOption : oldSpoutOptions) {
                if (commandLine.hasOption(oldOption)) {
                    printUsageAndExit(options,
                            oldOption + " is not accepted without " + OPTION_OLD_CONSUMER_LONG);
                }
            }
            if (!commandLine.hasOption(OPTION_GROUP_ID_LONG)
                    || !commandLine.hasOption(OPTION_BOOTSTRAP_BROKERS_LONG)) {
                printUsageAndExit(options, OPTION_GROUP_ID_LONG + " and " + OPTION_BOOTSTRAP_BROKERS_LONG
                        + " are required if " + OPTION_OLD_CONSUMER_LONG + " is not specified");
            }
            NewKafkaSpoutOffsetQuery newKafkaSpoutOffsetQuery = new NewKafkaSpoutOffsetQuery(
                    commandLine.getOptionValue(OPTION_TOPIC_LONG),
                    commandLine.getOptionValue(OPTION_BOOTSTRAP_BROKERS_LONG),
                    commandLine.getOptionValue(OPTION_GROUP_ID_LONG), securityProtocol);
            results = getOffsetLags(newKafkaSpoutOffsetQuery);
        }

        Map<String, Map<Integer, KafkaPartitionOffsetLag>> keyedResult = keyByTopicAndPartition(results);
        System.out.print(JSONValue.toJSONString(keyedResult));
    } catch (Exception ex) {
        System.out.print("Unable to get offset lags for kafka. Reason: ");
        ex.printStackTrace(System.out);
    }
}

From source file:org.apache.storm.kafka.ZkState.java

public void writeJSON(String path, Map<Object, Object> data) {
    LOG.debug("Writing {} the data {}", path, data.toString());
    writeBytes(path, JSONValue.toJSONString(data).getBytes(Charset.forName("UTF-8")));
}

From source file:org.apache.storm.StormSubmitter.java

/**
 * Submits a topology to run on the cluster as a particular user. A topology runs forever or until
 * explicitly killed.//ww  w.  j  a v a 2s .  co m
 *
 * @param name
 * @param topoConf
 * @param topology
 * @param opts
 * @param progressListener
 * @param asUser The user as which this topology should be submitted.
 * @throws AlreadyAliveException
 * @throws InvalidTopologyException
 * @throws AuthorizationException
 * @throws IllegalArgumentException thrown if configs will yield an unschedulable topology. validateConfs validates confs
 * @thorws SubmitterHookException if any Exception occurs during initialization or invocation of registered {@link ISubmitterHook}
 */
public static void submitTopologyAs(String name, Map<String, Object> topoConf, StormTopology topology,
        SubmitOptions opts, ProgressListener progressListener, String asUser) throws AlreadyAliveException,
        InvalidTopologyException, AuthorizationException, IllegalArgumentException {
    if (!Utils.isValidConf(topoConf)) {
        throw new IllegalArgumentException("Storm conf is not valid. Must be json-serializable");
    }
    topoConf = new HashMap(topoConf);
    topoConf.putAll(Utils.readCommandLineOpts());
    Map<String, Object> conf = Utils.readStormConfig();
    conf.putAll(topoConf);
    topoConf.putAll(prepareZookeeperAuthentication(conf));

    validateConfs(conf, topology);

    Map<String, String> passedCreds = new HashMap<>();
    if (opts != null) {
        Credentials tmpCreds = opts.get_creds();
        if (tmpCreds != null) {
            passedCreds = tmpCreds.get_creds();
        }
    }
    Map<String, String> fullCreds = populateCredentials(conf, passedCreds);
    if (!fullCreds.isEmpty()) {
        if (opts == null) {
            opts = new SubmitOptions(TopologyInitialStatus.ACTIVE);
        }
        opts.set_creds(new Credentials(fullCreds));
    }
    try {
        if (localNimbus != null) {
            LOG.info("Submitting topology " + name + " in local mode");
            if (opts != null) {
                localNimbus.submitTopologyWithOpts(name, topoConf, topology, opts);
            } else {
                // this is for backwards compatibility
                localNimbus.submitTopology(name, topoConf, topology);
            }
            LOG.info("Finished submitting topology: " + name);
        } else {
            String serConf = JSONValue.toJSONString(topoConf);
            try (NimbusClient client = NimbusClient.getConfiguredClientAs(conf, asUser)) {
                if (topologyNameExists(name, client)) {
                    throw new RuntimeException("Topology with name `" + name + "` already exists on cluster");
                }

                // Dependency uploading only makes sense for distributed mode
                List<String> jarsBlobKeys = Collections.emptyList();
                List<String> artifactsBlobKeys;

                DependencyUploader uploader = new DependencyUploader();
                try {
                    uploader.init();

                    jarsBlobKeys = uploadDependencyJarsToBlobStore(uploader);

                    artifactsBlobKeys = uploadDependencyArtifactsToBlobStore(uploader);
                } catch (Throwable e) {
                    // remove uploaded jars blobs, not artifacts since they're shared across the cluster
                    uploader.deleteBlobs(jarsBlobKeys);
                    uploader.shutdown();
                    throw e;
                }

                try {
                    setDependencyBlobsToTopology(topology, jarsBlobKeys, artifactsBlobKeys);
                    submitTopologyInDistributeMode(name, topology, opts, progressListener, asUser, conf,
                            serConf, client);
                } catch (AlreadyAliveException | InvalidTopologyException | AuthorizationException e) {
                    // remove uploaded jars blobs, not artifacts since they're shared across the cluster
                    // Note that we don't handle TException to delete jars blobs
                    // because it's safer to leave some blobs instead of topology not running
                    uploader.deleteBlobs(jarsBlobKeys);
                    throw e;
                } finally {
                    uploader.shutdown();
                }
            }
        }
    } catch (TException e) {
        throw new RuntimeException(e);
    }
    invokeSubmitterHook(name, asUser, conf, topology);

}

From source file:org.apache.storm.submit.command.DependencyResolverMain.java

/**
 * Main entry of dependency resolver./*ww w. j  a  v a  2s  . c  o  m*/
 *
 * @param args console arguments
 * @throws ParseException If there's parsing error on option parse.
 * @throws MalformedURLException If proxy URL is malformed.
 */
public static void main(String[] args) throws ParseException, MalformedURLException {
    Options options = buildOptions();
    CommandLineParser parser = new DefaultParser();
    CommandLine commandLine = parser.parse(options, args);

    if (!commandLine.hasOption(OPTION_ARTIFACTS_LONG)) {
        throw new IllegalArgumentException("artifacts must be presented.");
    }

    String artifactsArg = commandLine.getOptionValue(OPTION_ARTIFACTS_LONG);

    // DO NOT CHANGE THIS TO SYSOUT
    System.err.println("DependencyResolver input - artifacts: " + artifactsArg);
    List<Dependency> dependencies = parseArtifactArgs(artifactsArg);

    List<RemoteRepository> repositories;
    if (commandLine.hasOption(OPTION_ARTIFACT_REPOSITORIES_LONG)) {
        String remoteRepositoryArg = commandLine.getOptionValue(OPTION_ARTIFACT_REPOSITORIES_LONG);

        // DO NOT CHANGE THIS TO SYSOUT
        System.err.println("DependencyResolver input - repositories: " + remoteRepositoryArg);

        repositories = parseRemoteRepositoryArgs(remoteRepositoryArg);
    } else {
        repositories = Collections.emptyList();
    }

    try {
        String localMavenRepoPath = getOrDefaultLocalMavenRepositoryPath("local-repo");

        // create root directory if not exist
        Files.createDirectories(new File(localMavenRepoPath).toPath());

        DependencyResolver resolver = new DependencyResolver(localMavenRepoPath, repositories);

        if (commandLine.hasOption(OPTION_PROXY_URL_LONG)) {
            String proxyUrl = commandLine.getOptionValue(OPTION_PROXY_URL_LONG);
            String proxyUsername = commandLine.getOptionValue(OPTION_PROXY_USERNAME_LONG);
            String proxyPassword = commandLine.getOptionValue(OPTION_PROXY_PASSWORD_LONG);

            resolver.setProxy(parseProxyArg(proxyUrl, proxyUsername, proxyPassword));
        }

        List<ArtifactResult> artifactResults = resolver.resolve(dependencies);

        Iterable<ArtifactResult> missingArtifacts = filterMissingArtifacts(artifactResults);
        if (missingArtifacts.iterator().hasNext()) {
            printMissingArtifactsToSysErr(missingArtifacts);
            throw new RuntimeException("Some artifacts are not resolved");
        }

        System.out.println(JSONValue.toJSONString(transformArtifactResultToArtifactToPaths(artifactResults)));
        System.out.flush();
    } catch (Throwable e) {
        throw new RuntimeException(e);
    }
}

From source file:org.apache.storm.task.TopologyContext.java

@Override
public String toJSONString() {
    Map<String, Object> obj = new HashMap<>();
    obj.put("task->component", this.getTaskToComponent());
    obj.put("taskid", this.getThisTaskId());
    obj.put("componentid", this.getThisComponentId());
    List<String> streamList = new ArrayList<>();
    streamList.addAll(this.getThisStreams());
    obj.put("streams", streamList);
    obj.put("stream->outputfields", this.getThisOutputFieldsForStreams());
    // Convert targets to a JSON serializable format
    Map<String, Map<String, Object>> stringTargets = new HashMap<>();
    for (Map.Entry<String, Map<String, Grouping>> entry : this.getThisTargets().entrySet()) {
        Map<String, Object> stringTargetMap = new HashMap<>();
        for (Map.Entry<String, Grouping> innerEntry : entry.getValue().entrySet()) {
            stringTargetMap.put(innerEntry.getKey(), groupingToJSONableMap(innerEntry.getValue()));
        }/*from   w w w  .  j  a  v a  2s .  c  o  m*/
        stringTargets.put(entry.getKey(), stringTargetMap);
    }
    obj.put("stream->target->grouping", stringTargets);
    // Convert sources to a JSON serializable format
    Map<String, Map<String, Object>> stringSources = new HashMap<>();
    for (Map.Entry<GlobalStreamId, Grouping> entry : this.getThisSources().entrySet()) {
        GlobalStreamId gid = entry.getKey();
        Map<String, Object> stringSourceMap = stringSources.get(gid.get_componentId());
        if (stringSourceMap == null) {
            stringSourceMap = new HashMap<>();
            stringSources.put(gid.get_componentId(), stringSourceMap);
        }
        stringSourceMap.put(gid.get_streamId(), groupingToJSONableMap(entry.getValue()));
    }
    obj.put("source->stream->grouping", stringSources);
    obj.put("source->stream->fields", this.getThisInputFields());
    return JSONValue.toJSONString(obj);
}

From source file:org.apache.storm.trident.drpc.ReturnResultsReducer.java

@Override
public void complete(ReturnResultsState state, TridentCollector collector) {
    // only one of the multireducers will receive the tuples
    if (state.returnInfo != null) {
        String result = JSONValue.toJSONString(state.results);
        Map retMap = null;/* w  w w .  j a  va 2s  .  c om*/
        try {
            retMap = (Map) JSONValue.parseWithException(state.returnInfo);
        } catch (ParseException e) {
            collector.reportError(e);
            return;
        }
        final String host = (String) retMap.get("host");
        final int port = ObjectReader.getInt(retMap.get("port"));
        String id = (String) retMap.get("id");
        DistributedRPCInvocations.Iface client;
        if (local) {
            client = (DistributedRPCInvocations.Iface) ServiceRegistry.getService(host);
        } else {
            List server = new ArrayList() {
                {
                    add(host);
                    add(port);
                }
            };

            if (!_clients.containsKey(server)) {
                try {
                    _clients.put(server, new DRPCInvocationsClient(conf, host, port));
                } catch (TTransportException ex) {
                    throw new RuntimeException(ex);
                }
            }
            client = _clients.get(server);
        }

        try {
            client.result(id, result);
        } catch (AuthorizationException aze) {
            collector.reportError(aze);
        } catch (TException e) {
            collector.reportError(e);
        }
    }
}

From source file:org.apache.storm.util.CoreUtil.java

@SuppressWarnings("rawtypes")
@ClojureClass(className = "backtype.storm.util#to-json")
public static String to_json(Map m) {
    if (m == null) {
        return null;
    } else {//from   w  ww . ja v  a  2 s . c  o m
        return JSONValue.toJSONString(m);
    }
}

From source file:org.apache.storm.utils.Utils.java

public static boolean isValidConf(Map<String, Object> topoConf) {
    return normalizeConf(topoConf)
            .equals(normalizeConf((Map<String, Object>) JSONValue.parse(JSONValue.toJSONString(topoConf))));
}