spark streaming整合kafka-直連的方式
阿新 • • 發佈:2018-10-31
import kafka.common.TopicAndPartition import kafka.message.MessageAndMetadata import kafka.serializer.StringDecoder import kafka.utils.{ZKGroupTopicDirs, ZkUtils} import org.I0Itec.zkclient.ZkClient import org.apache.spark.SparkConf import org.apache.spark.rdd.RDD import org.apache.spark.streaming.dstream.{DStream, InputDStream} import org.apache.spark.streaming.kafka.{HasOffsetRanges, KafkaUtils, OffsetRange} import org.apache.spark.streaming.{Duration, StreamingContext} /** * Created by zx on 2017/7/31. */ object KafkaDirectWordCountV2 { def main(args: Array[String]): Unit = { //指定組名 val group = "g001" //建立SparkConf val conf = new SparkConf().setAppName("KafkaDirectWordCount").setMaster("local[2]") //建立SparkStreaming,並設定間隔時間 val ssc = new StreamingContext(conf, Duration(5000)) //指定消費的 topic 名字 val topic = "wwcc" //指定kafka的broker地址(sparkStream的Task直連到kafka的分割槽上,用更加底層的API消費,效率更高) val brokerList = "node-4:9092,node-5:9092,node-6:9092" //指定zk的地址,後期更新消費的偏移量時使用(以後可以使用Redis、MySQL來記錄偏移量) val zkQuorum = "node-1:2181,node-2:2181,node-3:2181" //建立 stream 時使用的 topic 名字集合,SparkStreaming可同時消費多個topic val topics: Set[String] = Set(topic) //建立一個 ZKGroupTopicDirs 物件,其實是指定往zk中寫入資料的目錄,用於儲存偏移量 val topicDirs = new ZKGroupTopicDirs(group, topic) //獲取 zookeeper 中的路徑 "/g001/offsets/wordcount/" val zkTopicPath = s"${topicDirs.consumerOffsetDir}" //準備kafka的引數 val kafkaParams = Map( "metadata.broker.list" -> brokerList, "group.id" -> group, //從頭開始讀取資料 "auto.offset.reset" -> kafka.api.OffsetRequest.SmallestTimeString ) //zookeeper 的host 和 ip,建立一個 client,用於跟新偏移量量的 //是zookeeper的客戶端,可以從zk中讀取偏移量資料,並更新偏移量 val zkClient = new ZkClient(zkQuorum) //查詢該路徑下是否位元組點(預設有位元組點為我們自己儲存不同 partition 時生成的) // /g001/offsets/wordcount/0/10001" // /g001/offsets/wordcount/1/30001" // /g001/offsets/wordcount/2/10001" //zkTopicPath -> /g001/offsets/wordcount/ val children = zkClient.countChildren(zkTopicPath) var kafkaStream: InputDStream[(String, String)] = null //如果 zookeeper 中有儲存 offset,我們會利用這個 offset 作為 kafkaStream 的起始位置 var fromOffsets: Map[TopicAndPartition, Long] = Map() //如果儲存過 offset if (children > 0) { for (i <- 0 until children) { // /g001/offsets/wordcount/0/10001 // /g001/offsets/wordcount/0 val partitionOffset = zkClient.readData[String](s"$zkTopicPath/${i}") // wordcount/0 val tp = TopicAndPartition(topic, i) //將不同 partition 對應的 offset 增加到 fromOffsets 中 // wordcount/0 -> 10001 fromOffsets += (tp -> partitionOffset.toLong) } //Key: kafka的key values: "hello tom hello jerry" //這個會將 kafka 的訊息進行 transform,最終 kafak 的資料都會變成 (kafka的key, message) 這樣的 tuple val messageHandler = (mmd: MessageAndMetadata[String, String]) => (mmd.key(), mmd.message()) //通過KafkaUtils建立直連的DStream(fromOffsets引數的作用是:按照前面計算好了的偏移量繼續消費資料) //[String, String, StringDecoder, StringDecoder, (String, String)] // key value key的解碼方式 value的解碼方式 kafkaStream = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder, (String, String)](ssc, kafkaParams, fromOffsets, messageHandler) } else { //如果未儲存,根據 kafkaParam 的配置使用最新(largest)或者最舊的(smallest) offset kafkaStream = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topics) } //偏移量的範圍 var offsetRanges = Array[OffsetRange]() //直連方式只有在KafkaDStream的RDD中才能獲取偏移量,那麼就不能到呼叫DStream的Transformation //所以只能子在kafkaStream呼叫foreachRDD,獲取RDD的偏移量,然後就是對RDD進行操作了 //依次迭代KafkaDStream中的KafkaRDD kafkaStream.foreachRDD { kafkaRDD => //只有KafkaRDD可以強轉成HasOffsetRanges,並獲取到偏移量 offsetRanges = kafkaRDD.asInstanceOf[HasOffsetRanges].offsetRanges val lines: RDD[String] = kafkaRDD.map(_._2) //對RDD進行操作,觸發Action lines.foreachPartition(partition => partition.foreach(x => { println(x) }) ) for (o <- offsetRanges) { // /g001/offsets/wordcount/0 val zkPath = s"${topicDirs.consumerOffsetDir}/${o.partition}" //將該 partition 的 offset 儲存到 zookeeper // /g001/offsets/wordcount/0/20000 ZkUtils.updatePersistentPath(zkClient, zkPath, o.untilOffset.toString) } } ssc.start() ssc.awaitTermination() } }