Example usage for com.google.common.collect Lists newArrayList

List of usage examples for com.google.common.collect Lists newArrayList

Introduction

In this page you can find the example usage for com.google.common.collect Lists newArrayList.

Prototype

@GwtCompatible(serializable = true)
public static <E> ArrayList<E> newArrayList(Iterator<? extends E> elements) 

Source Link

Document

Creates a mutable ArrayList instance containing the given elements; a very thin shortcut for creating an empty list and then calling Iterators#addAll .

Usage

From source file:brooklyn.demo.HighAvailabilityCassandraCluster.java

public static void main(String[] argv) {
    List<String> args = Lists.newArrayList(argv);
    String port = CommandLineUtil.getCommandLineOption(args, "--port", "8081+");
    String location = CommandLineUtil.getCommandLineOption(args, "--location", DEFAULT_LOCATION_SPEC);

    BrooklynLauncher launcher = BrooklynLauncher.newInstance()
            .application(EntitySpec.create(StartableApplication.class, HighAvailabilityCassandraCluster.class)
                    .displayName("Cassandra"))
            .webconsolePort(port).location(location).start();

    Entities.dumpInfo(launcher.getApplications());
}

From source file:com.eucalyptus.util.GenerateJiBXBinding.java

public static void main(String[] args) throws Exception {
    List<String> pathList = Lists.newArrayList(new File("build/edu/ucsb/eucalyptus/msgs/").list());
    List<Class> classList = Lists.newArrayList();
    for (String className : pathList) {
        if (className.startsWith("JiBX_") || className.endsWith("Category"))
            continue;
        classList.add(Class.forName("edu.ucsb.eucalyptus.msgs." + className.replaceAll(".class", "")));
    }//from  w w  w .ja v  a 2  s .c o  m
    GenerateJiBXBinding.binding("http://msgs.eucalyptus.ucsb.edu", classList);
    File out = new File("src/main/resources/msgs-binding.xml");
    out.delete();
    PrintWriter os = new PrintWriter(out);
    os.write(bindingFile);
    os.flush();
    os.close();
}

From source file:tv.icntv.grade.film.recommend.CFRecommendJob.java

public static void main(String[] args) throws Exception {
    final Configuration configuration = HBaseConfiguration.create();
    configuration.addResource("grade.xml");
    String baseCfData = String.format(configuration.get("hdfs.directory.base.score"), new Date());
    String output = String.format(configuration.get("icntv.cf.recommend.directory.target"), new Date());
    String temp = String.format(configuration.get("icntv.cf.recommend.directory.temp"), new Date());
    StringBuilder sb = new StringBuilder();
    sb.append("--input ").append(baseCfData);
    sb.append(" --output ").append(output);
    sb.append(" --numRecommendations ").append(configuration.get("icntv.cf.recommend.num"));
    sb.append(" --similarityClassname ").append(configuration.get("icntv.cf.recommend.similarityClassname"));
    sb.append(" --tempDir ").append(temp);

    String tables = configuration.get("hbase.cdn.tables");

    if (Strings.isNullOrEmpty(tables)) {
        return;//from  w  w w .  java 2  s  .c  o  m
    }
    List<String> list = Lists.newArrayList(Splitter.on(",").split(tables));
    List<String> results = Lists.transform(list, new Function<String, String>() {
        @Override
        public String apply(@Nullable java.lang.String input) {
            return String.format(configuration.get("hdfs.directory.base.db"), new Date(), input);
        }
    });

    int i = ToolRunner.run(configuration, new CFRecommendJob(),
            new String[] { Joiner.on(",").join(results), baseCfData, sb.toString(), output, temp });
    System.exit(i);
}

From source file:io.mindmaps.migration.sql.Main.java

public static void main(String[] args) {

    String jdbcDriver = null;//from  w ww .ja v a  2 s  .  c  o  m
    String jdbcDBUrl = null;
    String jdbcUser = null;
    String jdbcPass = null;
    String engineURL = null;
    String graphName = null;

    for (int i = 0; i < args.length; i++) {
        if ("-driver".equals(args[i]))
            jdbcDriver = args[++i];
        else if ("-database".equals(args[i]))
            jdbcDBUrl = args[++i];
        else if ("-user".equals(args[i]))
            jdbcUser = args[++i];
        else if ("-password".equals(args[i]))
            jdbcPass = args[++i];
        else if ("-graph".equals(args[i]))
            graphName = args[++i];
        else if ("-engine".equals(args[i]))
            engineURL = args[++i];
        else if ("sql".equals(args[0]))
            continue;
        else
            die("Unknown option " + args[i]);
    }

    if (jdbcDriver == null)
        die("Please specify the JDBC diver on the classpath using -driver option");
    if (jdbcDBUrl == null)
        die("Please specify the URL where the SQL db is running using -database option");
    if (jdbcUser == null)
        die("Please specify the username of the database using the -user option");
    if (jdbcPass == null)
        die("Please specify the password of the database using the -pass option");
    if (graphName == null) {
        die("Please specify the name of the graph using the -graph option");
    }

    System.out.println("Migrating " + jdbcDBUrl + " using MM Engine "
            + (engineURL == null ? "local" : engineURL) + " into graph " + graphName);

    // perform migration
    SQLSchemaMigrator schemaMigrator = new SQLSchemaMigrator();
    SQLDataMigrator dataMigrator = new SQLDataMigrator();

    try {

        MindmapsGraph graph = engineURL == null ? MindmapsClient.getGraph(graphName)
                : MindmapsClient.getGraph(graphName, engineURL);

        Loader loader = engineURL == null ? new BlockingLoader(graphName)
                : new DistributedLoader(graphName, Lists.newArrayList(engineURL));

        // make JDBC connection
        Class.forName(jdbcDriver).newInstance();
        Connection connection = DriverManager.getConnection(jdbcDBUrl, jdbcUser, jdbcPass);

        schemaMigrator.graph(graph).configure(connection).migrate(loader).close();

        System.out.println("Schema migration successful");

        dataMigrator.graph(graph).configure(connection).migrate(loader).close();

        System.out.println("DataType migration successful");

    } catch (Throwable throwable) {
        throwable.printStackTrace(System.err);
    }

    System.exit(0);
}

From source file:com.anhth12.test.Main.java

public static void main(String[] args) {

    SparkConf conf = new SparkConf();
    conf.setMaster("spark://192.168.56.101:7077");
    conf.setAppName("TEST");

    conf.setIfMissing("spark.executor.instance", Integer.toString(1));
    conf.setIfMissing("spark.executor.core", Integer.toString(1));
    conf.setIfMissing("spark.executor.memory", "512m");
    conf.setIfMissing("spark.driver.memory", "512m");

    String blockIntervalString = Long.toString(1000l);
    conf.setIfMissing("spark.streaming.blockInterval", blockIntervalString);
    conf.setIfMissing("spark.streaming.gracefulStopTimeout", blockIntervalString);
    conf.setIfMissing("spark.clean.ttl", Integer.toString(20 * 3000));
    conf.setIfMissing("spark.logConf", "true");
    conf.setIfMissing("spark.ui.port", Integer.toString(4040));

    try {//from w  ww .  j av  a  2s . c o m
        conf.setJars(new String[] {
                Main.class.getProtectionDomain().getCodeSource().getLocation().toURI().getPath() });
    } catch (Exception e) {
        throw new IllegalStateException(e);
    }

    JavaStreamingContext streamingContext = new JavaStreamingContext(new JavaSparkContext(conf),
            new Duration(100));

    Map<String, String> kafkaParams = new HashMap<>();
    kafkaParams.put("zookeeper.connect", "192.168.56.101:2181");
    kafkaParams.put("group.id", "LAMBDA-BATCHLAYER-" + System.currentTimeMillis());
    kafkaParams.put("serializer.encoding", "UTF-8");
    //        kafkaParams.put(null, null)

    Map<String, Integer> topicMap = Maps.newHashMap();
    topicMap.put("LambdaInput", 1);

    JavaPairDStream<String, String> dstream = KafkaUtils.createStream(streamingContext, "192.168.56.101:2181",
            "GROUP", topicMap);
    //        JavaPairDStream<String, String> dstream = KafkaUtils.createStream(streamingContext,
    //                String.class,
    //                String.class,
    //                StringDecoder.class,
    //                StringDecoder.class,
    //                kafkaParams, topicMap,
    //                StorageLevel.MEMORY_AND_DISK_2());

    JavaDStream<String> lines = dstream.map(new Function<Tuple2<String, String>, String>() {
        @Override
        public String call(Tuple2<String, String> tuple2) {
            System.out.println("message: " + tuple2._2());
            return tuple2._2();
        }
    });

    JavaDStream<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
        @Override
        public Iterable<String> call(String x) {
            System.out.println("x: " + x);
            return Lists.newArrayList(x.split(","));
        }
    });

    JavaPairDStream<String, Integer> wordCounts = words.mapToPair(new PairFunction<String, String, Integer>() {
        @Override
        public Tuple2<String, Integer> call(String s) {
            return new Tuple2<String, Integer>(s, 1);
        }
    }).reduceByKey(new Function2<Integer, Integer, Integer>() {
        @Override
        public Integer call(Integer i1, Integer i2) {
            return i1 + i2;
        }
    });

    wordCounts.print();

    streamingContext.start();
    streamingContext.awaitTermination();

}

From source file:tv.icntv.cms.response.SearchMsg.java

public static void main(String[] args) {
    Search search = Search.getInstance();
    TransportClient client = search.getESClient();
    BoolFilterBuilder bFilter = FilterBuilders.boolFilter().must(FilterBuilders.termFilter("region_code", "a"))
            .must(FilterBuilders.termFilter("platform_code", "sanxing"))
            //                .must(FilterBuilders.boolFilter().must(FilterBuilders.termFilter("tag_arr.primary_tag.tag_name", "")))
            .must(FilterBuilders.boolFilter().must(FilterBuilders.prefixFilter("program_series_header", "xp")));
    SearchResponse response = client.prepareSearch("cms_v1").setTypes("item").setPostFilter(bFilter)
            .addAggregation(AggregationBuilders.terms("first").field("tag_arr.primary_tag.tag_name")
                    .subAggregation(AggregationBuilders.terms("secondary")
                            .field("tag_arr.primary_tag.secondary_tag.tag_name")))
            .setFrom(0).setSize(1).execute().actionGet();
    //        System.out.println(response.toString());
    SearchHits hits = response.getHits();
    System.out.println(hits.getTotalHits());
    ////from  ww w  .  jav  a2s. c om
    Response response1 = new Response("success", 0);
    Msg msg = new Msg(2, 2, 3);
    for (SearchHit hit : hits) {
        //
        System.out.println(hit.sourceAsString());
        ProgramSeries items = JSON.parseObject(hit.source(), ProgramSeries.class);
        //             System.out.println(items.toString());
        ////            System.out.println(items.getProgram_series_name() +" \t" + items.getArtist().get(0).getArtist_id());
        //            //            Map<String,Object> maps=hit.sourceAsMap();
        ////
        ////            Set<String> keys= maps.keySet();
        ////            for(String str:keys){
        ////                System.out.println(str+"\t"+maps.get(str));
        ////            }
        msg.setList(Lists.newArrayList(items));
        Terms first = response.getAggregations().get("first");
        List<Tag> tags = Lists.newArrayList();
        for (Terms.Bucket buket : first.getBuckets()) {
            Tag primary = new Tag();
            primary.setCount(buket.getDocCount());
            primary.setName(buket.getKey());
            Terms secondary = buket.getAggregations().get("secondary");
            List<Tag> secondaries = Lists.newArrayList();
            for (Terms.Bucket b : secondary.getBuckets()) {
                Tag secondary_tag = new Tag();
                secondary_tag.setName(b.getKey());
                secondary_tag.setCount(b.getDocCount());
                secondaries.add(secondary_tag);
            }
            primary.setSecondary_tag(secondaries);
            tags.add(primary);
        }
        msg.setPrimary_tag(tags);
    }
    response1.setData(msg);
    System.out.println(JSON.toJSONString(response1));
}

From source file:com.mapr.PurchaseLog.java

public static void main(String[] args) throws IOException {
    Options opts = new Options();
    CmdLineParser parser = new CmdLineParser(opts);
    try {/*w ww  . java  2s. c o  m*/
        parser.parseArgument(args);
    } catch (CmdLineException e) {
        System.err.println("Usage: -count <number>G|M|K [ -users number ]  log-file user-profiles");
        return;
    }

    Joiner withTab = Joiner.on("\t");

    // first generate lots of user definitions
    SchemaSampler users = new SchemaSampler(
            Resources.asCharSource(Resources.getResource("user-schema.txt"), Charsets.UTF_8).read());
    File userFile = File.createTempFile("user", "tsv");
    BufferedWriter out = Files.newBufferedWriter(userFile.toPath(), Charsets.UTF_8);
    for (int i = 0; i < opts.users; i++) {
        out.write(withTab.join(users.sample()));
        out.newLine();
    }
    out.close();

    // now generate a session for each user
    Splitter onTabs = Splitter.on("\t");
    Splitter onComma = Splitter.on(",");

    Random gen = new Random();
    SchemaSampler intermediate = new SchemaSampler(
            Resources.asCharSource(Resources.getResource("hit_step.txt"), Charsets.UTF_8).read());

    final int COUNTRY = users.getFieldNames().indexOf("country");
    final int CAMPAIGN = intermediate.getFieldNames().indexOf("campaign_list");
    final int SEARCH_TERMS = intermediate.getFieldNames().indexOf("search_keywords");
    Preconditions.checkState(COUNTRY >= 0, "Need country field in user schema");
    Preconditions.checkState(CAMPAIGN >= 0, "Need campaign_list field in step schema");
    Preconditions.checkState(SEARCH_TERMS >= 0, "Need search_keywords field in step schema");

    out = Files.newBufferedWriter(new File(opts.out).toPath(), Charsets.UTF_8);

    for (String line : Files.readAllLines(userFile.toPath(), Charsets.UTF_8)) {
        long t = (long) (TimeUnit.MILLISECONDS.convert(30, TimeUnit.DAYS) * gen.nextDouble());
        List<String> user = Lists.newArrayList(onTabs.split(line));

        // pick session length
        int n = (int) Math.floor(-30 * Math.log(gen.nextDouble()));

        for (int i = 0; i < n; i++) {
            // time on page
            int dt = (int) Math.floor(-20000 * Math.log(gen.nextDouble()));
            t += dt;

            // hit specific values
            JsonNode step = intermediate.sample();

            // check for purchase
            double p = 0.01;
            List<String> campaigns = Lists.newArrayList(onComma.split(step.get("campaign_list").asText()));
            List<String> keywords = Lists.newArrayList(onComma.split(step.get("search_keywords").asText()));
            if ((user.get(COUNTRY).equals("us") && campaigns.contains("5"))
                    || (user.get(COUNTRY).equals("jp") && campaigns.contains("7")) || keywords.contains("homer")
                    || keywords.contains("simpson")) {
                p = 0.5;
            }

            String events = gen.nextDouble() < p ? "1" : "-";

            out.write(Long.toString(t));
            out.write("\t");
            out.write(line);
            out.write("\t");
            out.write(withTab.join(step));
            out.write("\t");
            out.write(events);
            out.write("\n");
        }
    }
    out.close();
}

From source file:org.apache.spark.examples.streaming.JavaStatefulNetworkWordCount.java

public static void main(String[] args) {
    if (args.length < 2) {
        System.err.println("Usage: JavaStatefulNetworkWordCount <hostname> <port>");
        System.exit(1);//  w  ww  .  ja  va  2s  . c  o m
    }

    StreamingExamples.setStreamingLogLevels();

    // Update the cumulative count function
    final Function2<List<Integer>, Optional<Integer>, Optional<Integer>> updateFunction = new Function2<List<Integer>, Optional<Integer>, Optional<Integer>>() {
        @Override
        public Optional<Integer> call(List<Integer> values, Optional<Integer> state) {
            Integer newSum = state.or(0);
            for (Integer value : values) {
                newSum += value;
            }
            return Optional.of(newSum);
        }
    };

    // Create the context with a 1 second batch size
    SparkConf sparkConf = new SparkConf().setAppName("JavaStatefulNetworkWordCount");
    JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, Durations.seconds(1));
    ssc.checkpoint(".");

    // Initial RDD input to updateStateByKey
    List<Tuple2<String, Integer>> tuples = Arrays.asList(new Tuple2<String, Integer>("hello", 1),
            new Tuple2<String, Integer>("world", 1));
    JavaPairRDD<String, Integer> initialRDD = ssc.sc().parallelizePairs(tuples);

    JavaReceiverInputDStream<String> lines = ssc.socketTextStream(args[0], Integer.parseInt(args[1]),
            StorageLevels.MEMORY_AND_DISK_SER_2);

    JavaDStream<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
        @Override
        public Iterable<String> call(String x) {
            return Lists.newArrayList(SPACE.split(x));
        }
    });

    JavaPairDStream<String, Integer> wordsDstream = words
            .mapToPair(new PairFunction<String, String, Integer>() {
                @Override
                public Tuple2<String, Integer> call(String s) {
                    return new Tuple2<String, Integer>(s, 1);
                }
            });

    // This will give a Dstream made of state (which is the cumulative count of the words)
    JavaPairDStream<String, Integer> stateDstream = wordsDstream.updateStateByKey(updateFunction,
            new HashPartitioner(ssc.sc().defaultParallelism()), initialRDD);

    stateDstream.print();
    ssc.start();
    ssc.awaitTermination();
}

From source file:org.apache.brooklyn.demo.WideAreaCassandraCluster.java

public static void main(String[] argv) {
    List<String> args = Lists.newArrayList(argv);
    String port = CommandLineUtil.getCommandLineOption(args, "--port", "8081+");
    String locations = CommandLineUtil.getCommandLineOption(args, "--location", DEFAULT_LOCATION_SPEC);

    BrooklynLauncher launcher = BrooklynLauncher.newInstance().application(EntitySpec
            .create(StartableApplication.class, WideAreaCassandraCluster.class).displayName("Cassandra"))
            .webconsolePort(port).locations(Arrays.asList(locations)).start();

    Entities.dumpInfo(launcher.getApplications());
}

From source file:com.naltel.spark.JavaStatefulNetworkWordCount.java

public static void main(String[] args) {
    if (args.length < 2) {
        System.err.println("Usage: JavaStatefulNetworkWordCount <hostname> <port>");
        System.exit(1);// w w w. ja  v  a2s  .  c  om
    }

    StreamingExamples.setStreamingLogLevels();

    // Update the cumulative count function
    final Function2<List<Integer>, Optional<Integer>, Optional<Integer>> updateFunction = new Function2<List<Integer>, Optional<Integer>, Optional<Integer>>() {
        @Override
        public Optional<Integer> call(List<Integer> values, Optional<Integer> state) {
            Integer newSum = state.or(0);
            for (Integer value : values) {
                newSum += value;
            }
            return Optional.of(newSum);
        }
    };

    // Create the context with a 1 second batch size
    SparkConf sparkConf = new SparkConf().setAppName("JavaStatefulNetworkWordCount");
    JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, Durations.seconds(1));
    ssc.checkpoint(".");

    // Initial RDD input to updateStateByKey
    @SuppressWarnings("unchecked")
    List<Tuple2<String, Integer>> tuples = Arrays.asList(new Tuple2<String, Integer>("hello", 1),
            new Tuple2<String, Integer>("world", 1));
    JavaPairRDD<String, Integer> initialRDD = ssc.sc().parallelizePairs(tuples);

    JavaReceiverInputDStream<String> lines = ssc.socketTextStream(args[0], Integer.parseInt(args[1]),
            StorageLevels.MEMORY_AND_DISK_SER_2);

    JavaDStream<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
        @Override
        public Iterable<String> call(String x) {
            return Lists.newArrayList(SPACE.split(x));
        }
    });

    @SuppressWarnings("serial")
    JavaPairDStream<String, Integer> wordsDstream = words
            .mapToPair(new PairFunction<String, String, Integer>() {
                @Override
                public Tuple2<String, Integer> call(String s) {
                    return new Tuple2<String, Integer>(s, 1);
                }
            });

    // This will give a Dstream made of state (which is the cumulative count of the words)
    // JavaPairDStream<String, Integer> stateDstream = wordsDstream.updateStateByKey(updateFunction,
    //         new HashPartitioner(ssc.sc().defaultParallelism()), initialRDD);

    //stateDstream.print();
    ssc.start();
    ssc.awaitTermination();
}