KafkaSparkMongo.java Source code

Java tutorial

Introduction

Here is the source code for KafkaSparkMongo.java

Source

/*
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

import java.time.Instant;
import java.util.*;
import java.util.regex.Pattern;

import com.mongodb.MongoClient;
import com.mongodb.MongoClientURI;
import org.apache.spark.streaming.api.java.*;

import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.types.*;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;

import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.kafka.KafkaUtils;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;

import com.mongodb.spark.MongoSpark;
import java.sql.Timestamp;
import scala.Tuple7;
import kafka.serializer.StringDecoder;

/**
 * Counts words in UTF8 encoded, '\n' delimited text received from the network every second and it on a Mongo Database.
 * <hostname> and <port> describe the TCP server that Spark Streaming would connect to receive data.
 *
 * To run this on your local machine, you need to first run a Netcat server
 *    `$ nc -lk 9999`
 * and then run the example
 *    `$ spark-submit --class KafkaSparkMongo PATH/KafkaSparkMongo localhost 9999`
 */
public final class KafkaSparkMongo {
    private static final Pattern SPACE = Pattern.compile(" ");

    public static void main(String[] args) throws Exception {
        if (args.length < 2) {
            System.err.println("Usage: JavaDirectKafkaWordCount <brokers> <topics>\n"
                    + "  <brokers> is a list of one or more Kafka brokers\n"
                    + "  <topics> is a list of one or more kafka topics to consume from\n\n");
            System.exit(1);
        }

        String brokers = args[0];
        String topics = args[1];

        String UriMongo = "mongodb://localhost/streamSparkFinal.coll";
        dropDatabase(UriMongo);

        // Create the context with a 1 second batch size
        SparkConf sparkConf = new SparkConf().setAppName("JavaNetworkWordCount")
                .set("spark.app.id", "MongoSparkConnectorTour").set("spark.mongodb.input.uri", UriMongo)
                .set("spark.mongodb.output.uri", UriMongo);

        JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, Durations.seconds(5));
        /** Create a JavaReceiverInputDStream on target ip:port and count the
         * words in input stream of \n delimited text (eg. generated by 'nc')
         * Note that no duplication in storage level only for running locally.
         * Replication necessary in distributed scenario for fault tolerance.
         */

        Set<String> topicsSet = new HashSet<>(Arrays.asList(topics.split(",")));
        Map<String, String> kafkaParams = new HashMap<>();
        kafkaParams.put("metadata.broker.list", brokers);

        // Create direct kafka stream with brokers and topics
        JavaPairInputDStream<String, String> messages = KafkaUtils.createDirectStream(ssc, String.class,
                String.class, StringDecoder.class, StringDecoder.class, kafkaParams, topicsSet);

        messages.print();

        JavaDStream<String> lines = messages.map(x -> x._2());

        JavaDStream<Tuple7<String, String, String, String, String, String, String>> words = lines.map(y -> {
            String[] wordy = SPACE.split(y);
            return new Tuple7<>(wordy[0], wordy[1], wordy[2], wordy[3], wordy[4], wordy[5], wordy[6]);
        });

        words.foreachRDD(rdd -> {

            List<StructField> subFields = new ArrayList<>();
            subFields.add(DataTypes.createStructField("X", DataTypes.DoubleType, true));
            subFields.add(DataTypes.createStructField("Y", DataTypes.DoubleType, true));
            subFields.add(DataTypes.createStructField("z", DataTypes.DoubleType, true));

            List<StructField> fields = new ArrayList<>();
            fields.add(DataTypes.createStructField("Serial", DataTypes.StringType, true));
            fields.add(DataTypes.createStructField("Zone", DataTypes.StringType, true));
            fields.add(DataTypes.createStructField("Group", DataTypes.StringType, true));
            fields.add(DataTypes.createStructField("coord", DataTypes.createStructType(subFields), true));
            fields.add(DataTypes.createStructField("Time", DataTypes.TimestampType, true));

            StructType schema = DataTypes.createStructType(fields);

            SparkSession spark = JavaSparkSessionSingleton.getInstance(rdd.context().getConf());

            JavaRDD<Row> rowRDD = rdd
                    .map(palabra -> RowFactory.create(palabra._1(), palabra._2(), palabra._3(),
                            RowFactory.create(Double.parseDouble(palabra._4()), Double.parseDouble(palabra._5()),
                                    Double.parseDouble(palabra._6())),
                            Timestamp.from(Instant.parse(palabra._7()))));

            Dataset<Row> wordsDataFrame = spark.createDataFrame(rowRDD, schema);
            wordsDataFrame.show();
            MongoSpark.write(wordsDataFrame).option("collection", "pruebaF").mode("append").save();
        });

        ssc.start();
        ssc.awaitTermination();
    }

    private static void dropDatabase(final String connectionString) {
        MongoClientURI uri = new MongoClientURI(connectionString);
        new MongoClient(uri).dropDatabase(uri.getDatabase());
    }
}

/** Lazily instantiated singleton instance of SparkSession */
class JavaSparkSessionSingleton {
    private static transient SparkSession instance = null;

    public static SparkSession getInstance(SparkConf sparkConf) {
        if (instance == null) {
            instance = SparkSession.builder().config(sparkConf).getOrCreate();
        }
        return instance;
    }
}