List of usage examples for com.google.common.collect Lists newArrayList
@GwtCompatible(serializable = true) public static <E> ArrayList<E> newArrayList(Iterator<? extends E> elements)
From source file:com.technostar98.tcbot.Launcher.java
public static void main(String[] args) { try {//from w w w . j a v a 2 s .c o m Stats.getStat("startTime").getValue();//Make sure <cinit> is called in stats before anything else StringJoiner joiner = new StringJoiner(" "); for (String arg : args) joiner.add(arg); ArgumentParser.init(joiner.toString()); ArgumentParser argLoader = ArgumentParser.INSTANCE(); argLoader.registerOptionalArgument("debug", ArgumentParser.EQUALS | ArgumentParser.COLON | ArgumentParser.SPACED); argLoader.registerOptionalArgument("maxScriptThreads", ArgumentParser.COLLECTION_MAP | ArgumentParser.COLON | ArgumentParser.EQUALS); argLoader.registerOptionalArgument("noGui", ArgumentParser.EQUALS | ArgumentParser.COLON | ArgumentParser.SPACED); //EXAMPLE CODE /*argLoader.registerOptionalArgument("test1", ArgumentParser.EQUALS | ArgumentParser.COLON); argLoader.registerOptionalArgument("test2", ArgumentParser.COLLECTION_LIST); argLoader.registerOptionalArgument("test3", ArgumentParser.COLLECTION_MAP);*/ argLoader.processArguments(); ConfigFile config = new ConfigFile( Configs.getStringConfiguration("configDir").getValue() + "configs.json"); config.readFileContents(); if (!config.isInitialized()) { config.setContents(Configs.configurations); config.saveFileContents(); } else { Configs.setStartupConfigurations((HashMap) config.getMappedContents()); } //Load up the api from the internal version CommandManager.commandManager = Optional.of((ICommandFilterRegistry) CommandPool.INSTANCE); ServerConfigFile serverConfigFile = new ServerConfigFile(); serverConfigFile.readFileContents(); if (serverConfigFile.isInitialized()) { Map<String, ServerConfiguration> serverConfigs = serverConfigFile.getMappedContents(); for (String s : serverConfigs.keySet()) { BotManager.createNewBot(serverConfigs.get(s)); } } else { //Backup/Default server configuration file ServerConfiguration esper = new ServerConfiguration("Technostarhosting", "irc.technostarhosting.com", "TCBot", "TEMP", Lists.newArrayList("Horfius"), "#TCBot"); serverConfigFile.addField(esper.getServerName(), esper); serverConfigFile.saveFileContents(); BotManager.createNewBot(esper); } BotManager.start();//Launch bots if (Boolean.valueOf((String) argLoader.getArgumentValue("debug"))) { BotManager.startDebugMonitor(); //TODO actual debug monitor } } catch (Exception e) { e.printStackTrace(); CrashLogBuilder b = new CrashLogBuilder(e); b.buildLog(); System.exit(-1); } }
From source file:io.mindmaps.migration.csv.Main.java
public static void main(String[] args) { String csvFileName = null;//from w w w .j a va 2s .com String csvEntityType = null; String engineURL = null; String graphName = null; for (int i = 0; i < args.length; i++) { if ("-file".equals(args[i])) csvFileName = args[++i]; else if ("-graph".equals(args[i])) graphName = args[++i]; else if ("-engine".equals(args[i])) engineURL = args[++i]; else if ("-as".equals(args[i])) { csvEntityType = args[++i]; } else if ("csv".equals(args[0])) { continue; } else die("Unknown option " + args[i]); } if (csvFileName == null) { die("Please specify CSV file using the -csv option"); } File csvFile = new File(csvFileName); if (!csvFile.exists()) { die("Cannot find file: " + csvFileName); } if (graphName == null) { die("Please provide the name of the graph using -graph"); } if (csvEntityType == null) { csvEntityType = csvFile.getName().replaceAll("[^A-Za-z0-9]", "_"); } System.out.println("Migrating " + csvFileName + " using MM Engine " + (engineURL == null ? "local" : engineURL) + " into graph " + graphName); // perform migration CSVSchemaMigrator schemaMigrator = new CSVSchemaMigrator(); CSVDataMigrator dataMigrator = new CSVDataMigrator(); // try { MindmapsGraph graph = engineURL == null ? MindmapsClient.getGraph(graphName) : MindmapsClient.getGraph(graphName, engineURL); Loader loader = engineURL == null ? new BlockingLoader(graphName) : new DistributedLoader(graphName, Lists.newArrayList(engineURL)); CSVParser csvParser = CSVParser.parse(csvFile.toURI().toURL(), StandardCharsets.UTF_8, CSVFormat.DEFAULT.withHeader()); schemaMigrator.graph(graph).configure(csvEntityType, csvParser).migrate(loader); System.out.println("Schema migration successful"); dataMigrator.graph(graph).configure(csvEntityType, csvParser).migrate(loader); System.out.println("DataType migration successful"); } catch (Throwable throwable) { throwable.printStackTrace(System.err); } System.exit(0); }
From source file:org.apache.spark.streaming.examples.JavaKafkaWordCount.java
public static void main(String[] args) { if (args.length < 5) { System.err.println("Usage: KafkaWordCount <master> <zkQuorum> <group> <topics> <numThreads>"); System.exit(1);/*from w w w . j a v a 2 s . c o m*/ } StreamingExamples.setStreamingLogLevels(); // Create the context with a 1 second batch size JavaStreamingContext jssc = new JavaStreamingContext(args[0], "KafkaWordCount", new Duration(2000), System.getenv("SPARK_HOME"), JavaStreamingContext.jarOfClass(JavaKafkaWordCount.class)); int numThreads = Integer.parseInt(args[4]); Map<String, Integer> topicMap = new HashMap<String, Integer>(); String[] topics = args[3].split(","); for (String topic : topics) { topicMap.put(topic, numThreads); } JavaPairDStream<String, String> messages = KafkaUtils.createStream(jssc, args[1], args[2], topicMap); JavaDStream<String> lines = messages.map(new Function<Tuple2<String, String>, String>() { @Override public String call(Tuple2<String, String> tuple2) { return tuple2._2(); } }); JavaDStream<String> words = lines.flatMap(new FlatMapFunction<String, String>() { @Override public Iterable<String> call(String x) { return Lists.newArrayList(SPACE.split(x)); } }); JavaPairDStream<String, Integer> wordCounts = words.mapToPair(new PairFunction<String, String, Integer>() { @Override public Tuple2<String, Integer> call(String s) { return new Tuple2<String, Integer>(s, 1); } }).reduceByKey(new Function2<Integer, Integer, Integer>() { @Override public Integer call(Integer i1, Integer i2) { return i1 + i2; } }); wordCounts.print(); jssc.start(); jssc.awaitTermination(); }
From source file:efinance.examples.streaming.JavaNetworkWordCount.java
public static void main(String[] args) { if (args.length < 2) { System.err.println("Usage: JavaNetworkWordCount <hostname> <port>"); System.exit(1);// w w w . j av a 2 s. c om } //StreamingExamples.setStreamingLogLevels(); Logger.getLogger("org").setLevel(Level.OFF); // Create the context with a 1 second batch size SparkConf sparkConf = new SparkConf().setAppName("JavaNetworkWordCount"); JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, Durations.seconds(1)); // System.setProperty("http.proxyHost", "proxy.reply.it"); // System.setProperty("http.proxyPort", "8080"); // System.setProperty("https.proxyHost", "proxy.reply.it"); // System.setProperty("https.proxyPort", "8080"); // Create a JavaReceiverInputDStream on target ip:port and count the // words in input stream of \n delimited text (eg. generated by 'nc') // Note that no duplication in storage level only for running locally. // Replication necessary in distributed scenario for fault tolerance. JavaReceiverInputDStream<String> lines = ssc.socketTextStream(args[0], Integer.parseInt(args[1]), StorageLevels.MEMORY_AND_DISK_SER); JavaDStream<String> words = lines.flatMap(new FlatMapFunction<String, String>() { @Override public Iterable<String> call(String x) { return Lists.newArrayList(SPACE.split(x)); } }); JavaPairDStream<String, Integer> wordCounts = words.mapToPair(new PairFunction<String, String, Integer>() { @Override public Tuple2<String, Integer> call(String s) { return new Tuple2<String, Integer>(s, 1); } }).reduceByKey(new Function2<Integer, Integer, Integer>() { @Override public Integer call(Integer i1, Integer i2) { return i1 + i2; } }); wordCounts.print(); ssc.start(); ssc.awaitTermination(); }
From source file:kafkaWordCount.JavaKafkaWordCount.java
public static void main(String[] args) { // if (args.length < 4) { // System.err.println("Usage: JavaKafkaWordCount <zkQuorum> <group> <topics> <numThreads>"); // System.exit(1); // }//from www .j av a 2s .co m String[] args1 = { "localhost:2181", "my-consumer-group", "top1,top2", "2" }; SparkConf sparkConf = new SparkConf().setAppName("JavaKafkaWordCount").setMaster("local[2]"); // Create the context with 2 seconds batch size JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, new Duration(2000)); int numThreads = Integer.parseInt(args1[3]); Map<String, Integer> topicMap = new HashMap<String, Integer>(); String[] topics = args1[2].split(","); for (String topic : topics) { topicMap.put(topic, numThreads); System.out.println("htq" + topic); } JavaPairReceiverInputDStream<String, String> messages = KafkaUtils.createStream(jssc, args1[0], args1[1], topicMap); JavaDStream<String> lines = messages.map(new Function<Tuple2<String, String>, String>() { public String call(Tuple2<String, String> tuple2) { return tuple2._2(); } }); JavaDStream<String> words = lines.flatMap(new FlatMapFunction<String, String>() { public Iterable<String> call(String x) { return Lists.newArrayList(SPACE.split(x)); } }); JavaPairDStream<String, Integer> wordCounts = words.mapToPair(new PairFunction<String, String, Integer>() { public Tuple2<String, Integer> call(String s) { return new Tuple2<String, Integer>(s, 1); } }).reduceByKey(new Function2<Integer, Integer, Integer>() { public Integer call(Integer i1, Integer i2) { return i1 + i2; } }); wordCounts.print(); jssc.start(); jssc.awaitTermination(); }
From source file:com.sdw.dream.spark.examples.streaming.JavaKafkaWordCount.java
public static void main(String[] args) { if (args.length < 4) { System.err.println("Usage: JavaKafkaWordCount <zkQuorum> <group> <topics> <numThreads>"); System.exit(1);//from w ww .ja v a2 s . c o m } StreamingExamples.setStreamingLogLevels(); SparkConf sparkConf = new SparkConf().setAppName("JavaKafkaWordCount"); // Create the context with 2 seconds batch size JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, new Duration(2000)); int numThreads = Integer.parseInt(args[3]); Map<String, Integer> topicMap = new HashMap<String, Integer>(); String[] topics = args[2].split(","); for (String topic : topics) { topicMap.put(topic, numThreads); } JavaPairReceiverInputDStream<String, String> messages = KafkaUtils.createStream(jssc, args[0], args[1], topicMap); JavaDStream<String> lines = messages.map(new Function<Tuple2<String, String>, String>() { @Override public String call(Tuple2<String, String> tuple2) { return tuple2._2(); } }); JavaDStream<String> words = lines.flatMap(new FlatMapFunction<String, String>() { @Override public Iterable<String> call(String x) { return Lists.newArrayList(SPACE.split(x)); } }); JavaPairDStream<String, Integer> wordCounts = words.mapToPair(new PairFunction<String, String, Integer>() { @Override public Tuple2<String, Integer> call(String s) { return new Tuple2<String, Integer>(s, 1); } }).reduceByKey(new Function2<Integer, Integer, Integer>() { @Override public Integer call(Integer i1, Integer i2) { return i1 + i2; } }); wordCounts.print(); jssc.start(); jssc.awaitTermination(); }
From source file:com.google.api.services.samples.youtube.cmdline.data.Captions.java
/** * Upload, list, update, download, and delete caption tracks. * * @param args command line args (not used). *///from w w w.j ava 2 s. c o m public static void main(String[] args) { // This OAuth 2.0 access scope allows for full read/write access to the // authenticated user's account and requires requests to use an SSL connection. List<String> scopes = Lists.newArrayList("https://www.googleapis.com/auth/youtube.force-ssl"); try { // Authorize the request. Credential credential = Auth.authorize(scopes, "captions"); // This object is used to make YouTube Data API requests. youtube = new YouTube.Builder(Auth.HTTP_TRANSPORT, Auth.JSON_FACTORY, credential) .setApplicationName("youtube-cmdline-captions-sample").build(); // Prompt the user to specify the action of the be achieved. String actionString = getActionFromUser(); System.out.println("You chose " + actionString + "."); Action action = Action.valueOf(actionString.toUpperCase()); switch (action) { case UPLOAD: uploadCaption(getVideoId(), getLanguage(), getName(), getCaptionFromUser()); break; case LIST: listCaptions(getVideoId()); break; case UPDATE: updateCaption(getCaptionIDFromUser(), getUpdateCaptionFromUser()); break; case DOWNLOAD: downloadCaption(getCaptionIDFromUser()); break; case DELETE: deleteCaption(getCaptionIDFromUser()); break; default: // All the available methods are used in sequence just for the sake // of an example. //Prompt the user to specify a video to upload the caption track for and // a language, a name, a binary file for the caption track. Then upload the // caption track with the values that are selected by the user. String videoId = getVideoId(); uploadCaption(videoId, getLanguage(), getName(), getCaptionFromUser()); List<Caption> captions = listCaptions(videoId); if (captions.isEmpty()) { System.out.println("Can't get video caption tracks."); } else { // Retrieve the first uploaded caption track. String firstCaptionId = captions.get(0).getId(); updateCaption(firstCaptionId, null); downloadCaption(firstCaptionId); deleteCaption(firstCaptionId); } } } catch (GoogleJsonResponseException e) { System.err.println("GoogleJsonResponseException code: " + e.getDetails().getCode() + " : " + e.getDetails().getMessage()); e.printStackTrace(); } catch (IOException e) { System.err.println("IOException: " + e.getMessage()); e.printStackTrace(); } catch (Throwable t) { System.err.println("Throwable: " + t.getMessage()); t.printStackTrace(); } }
From source file:org.apache.spark.examples.streaming.JavaKafkaWordCount.java
public static void main(String[] args) { if (args.length < 4) { System.err.println("Usage: JavaKafkaWordCount <zkQuorum> <group> <topics> <numThreads>"); System.exit(1);//from w w w . j av a 2 s. c o m } StreamingExamples.setStreamingLogLevels(); SparkConf sparkConf = new SparkConf().setAppName("JavaKafkaWordCount"); // Create the context with a 1 second batch size JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, new Duration(2000)); int numThreads = Integer.parseInt(args[3]); Map<String, Integer> topicMap = new HashMap<String, Integer>(); String[] topics = args[2].split(","); for (String topic : topics) { topicMap.put(topic, numThreads); } JavaPairReceiverInputDStream<String, String> messages = KafkaUtils.createStream(jssc, args[0], args[1], topicMap); JavaDStream<String> lines = messages.map(new Function<Tuple2<String, String>, String>() { @Override public String call(Tuple2<String, String> tuple2) { return tuple2._2(); } }); JavaDStream<String> words = lines.flatMap(new FlatMapFunction<String, String>() { @Override public Iterable<String> call(String x) { return Lists.newArrayList(SPACE.split(x)); } }); JavaPairDStream<String, Integer> wordCounts = words.mapToPair(new PairFunction<String, String, Integer>() { @Override public Tuple2<String, Integer> call(String s) { return new Tuple2<String, Integer>(s, 1); } }).reduceByKey(new Function2<Integer, Integer, Integer>() { @Override public Integer call(Integer i1, Integer i2) { return i1 + i2; } }); wordCounts.print(); jssc.start(); jssc.awaitTermination(); }
From source file:com.zq.exec.stream.JavaKafkaWordCount.java
public static void main(String[] args) { if (args.length < 4) { System.err.println("Usage: JavaKafkaWordCount <zkQuorum> <group> <topics> <numThreads>"); System.exit(1);/* w ww .ja v a 2s.c o m*/ } SparkConf sparkConf = new SparkConf().setAppName("JavaKafkaWordCount"); // Create the context with 2 seconds batch size JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, new Duration(2000)); int numThreads = Integer.parseInt(args[3]); Map<String, Integer> topicMap = new HashMap<String, Integer>(); String[] topics = args[2].split(","); for (String topic : topics) { topicMap.put(topic, numThreads); } JavaPairReceiverInputDStream<String, String> messages = KafkaUtils.createStream(jssc, args[0], args[1], topicMap); JavaDStream<String> lines = messages.map(new Function<Tuple2<String, String>, String>() { @Override public String call(Tuple2<String, String> tuple2) { return tuple2._2(); } }); JavaDStream<String> words = lines.flatMap(new FlatMapFunction<String, String>() { @Override public Iterable<String> call(String x) { return Lists.newArrayList(SPACE.split(x)); } }); JavaPairDStream<String, Integer> wordCounts = words.mapToPair(new PairFunction<String, String, Integer>() { @Override public Tuple2<String, Integer> call(String s) { return new Tuple2<String, Integer>(s, 1); } }).reduceByKey(new Function2<Integer, Integer, Integer>() { @Override public Integer call(Integer i1, Integer i2) { return i1 + i2; } }); wordCounts.map(new Function<Tuple2<String, Integer>, String>() { @Override public String call(Tuple2<String, Integer> v1) throws Exception { System.out.println("---" + v1._1 + "-count---" + v1._2); LOG.info("---" + v1._1 + "-count---" + v1._2); return v1._1 + "=count=" + v1._2; } }).count().print(); // wordCounts.print(); jssc.start(); jssc.awaitTermination(); }
From source file:com.naltel.spark.JavaKafkaWordCount.java
public static void main(String[] args) { if (args.length < 4) { System.err.println("Usage: JavaKafkaWordCount <zkQuorum> <group> <topics> <numThreads>"); System.exit(1);//from w w w .j a va 2s . c om } //StreamingExamples.setStreamingLogLevels(); SparkConf sparkConf = new SparkConf().setAppName("JavaKafkaWordCount") .set("spark.streaming.receiver.writeAheadLog.enable", "false"); // Create the context with a 1 second batch size JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, new Duration(2000)); int numThreads = Integer.parseInt(args[3]); Map<String, Integer> topicMap = new HashMap<String, Integer>(); String[] topics = args[2].split(","); for (String topic : topics) { topicMap.put(topic, numThreads); } JavaPairReceiverInputDStream<String, String> messages = KafkaUtils.createStream(jssc, args[0], args[1], topicMap); JavaDStream<String> lines = messages.map(new Function<Tuple2<String, String>, String>() { @Override public String call(Tuple2<String, String> tuple2) { return tuple2._2(); } }); JavaDStream<String> words = lines.flatMap(new FlatMapFunction<String, String>() { @Override public Iterable<String> call(String x) { return Lists.newArrayList(SPACE.split(x)); } }); JavaPairDStream<String, Integer> wordCounts = words.mapToPair(new PairFunction<String, String, Integer>() { @Override public Tuple2<String, Integer> call(String s) { return new Tuple2<String, Integer>(s, 1); } }).reduceByKey(new Function2<Integer, Integer, Integer>() { @Override public Integer call(Integer i1, Integer i2) { return i1 + i2; } }); wordCounts.print(); jssc.start(); jssc.awaitTermination(); }