1. 程式人生 > >SparkStreaming消費Kafka中的資料 使用zookeeper和MySQL儲存偏移量的兩種方式

SparkStreaming消費Kafka中的資料 使用zookeeper和MySQL儲存偏移量的兩種方式

Spark讀取Kafka資料的方式有兩種,一種是receiver方式,另一種是直連方式。今天分享的SparkStreaming消費Kafka中的資料儲存偏移量的兩種方式都是基於直連方式上的
話不多說 直接上程式碼 !

第一種是使用zookeeper儲存偏移量

object KafkaDirectZookeeper {

  def main(args: Array[String]): Unit = {
 
    val group = "DirectAndZk"
    val conf = new SparkConf().setAppName("KafkaDirectWordCount").setMaster("local[2]")
    val ssc = new StreamingContext(conf, Duration(5000))
    val topic = "ditopic"
    //指定kafka的broker地址(sparkStream的Task直連到kafka的分割槽上,用更加底層的API消費,效率更高)
    val brokerList = "hadoop01:9092,hadoop02:9092,hadoop03:9092"
    //指定zk的地址,後期更新消費的偏移量時使用(以後可以使用Redis、MySQL來記錄偏移量)
    val zkQuorum = "hadoop01:2181,hadoop02:2181,hadoop03:2181"
    //建立 stream 時使用的 topic 名字集合,SparkStreaming可同時消費多個topic
    val topics: Set[String] = Set(topic)
    //建立一個 ZKGroupTopicDirs 物件,其實是指定往zk中寫入資料的目錄,用於儲存偏移量
    val topicDirs = new ZKGroupTopicDirs(group, topic)
   // new ZKGroupTopicDirs()
    //獲取 zookeeper 中的路徑 "/g001/offsets/wordcount/"
    val zkTopicPath = s"${topicDirs.consumerOffsetDir}"

    //準備kafka的引數
    val kafkaParams = Map(
      "metadata.broker.list" -> brokerList,
      "group.id" -> group,
      //從頭開始讀取資料
      "auto.offset.reset" -> kafka.api.OffsetRequest.SmallestTimeString
    )

    //zookeeper 的host 和 ip,建立一個 client,用於跟新偏移量量的
    //是zookeeper的客戶端,可以從zk中讀取偏移量資料,並更新偏移量
    val zkClient = new ZkClient(zkQuorum)

    //查詢該路徑下是否位元組點(預設有位元組點為我們自己儲存不同 partition 時生成的)
    // /g001/offsets/wordcount/0/10001"
    // /g001/offsets/wordcount/1/30001"
    // /g001/offsets/wordcount/2/10001"
    //zkTopicPath  -> /g001/offsets/wordcount/

    val children = zkClient.countChildren(zkTopicPath)

    var kafkaStream: InputDStream[(String, String)] = null

    //如果 zookeeper 中有儲存 offset,我們會利用這個 offset 作為 kafkaStream 的起始位置
    var fromOffsets: Map[TopicAndPartition, Long] = Map()

    //如果儲存過 offset
    if (children > 0) {
      for (i <- 0 until children) {
        // /g001/offsets/wordcount/0/10001
        // /g001/offsets/wordcount/0
        val partitionOffset = zkClient.readData[String](s"$zkTopicPath/${i}")
        // wordcount/0
        val tp = TopicAndPartition(topic, i)
        //將不同 partition 對應的 offset 增加到 fromOffsets 中
        // wordcount/0 -> 10001
        fromOffsets += (tp -> partitionOffset.toLong)
      }
      //Key: kafka的key   values: "hello tom hello jerry"
         //這個會將 kafka 的訊息進行 transform,最終 kafak 的資料都會變成 (kafka的key, message) 這樣的 tuple
      val messageHandler = (mmd: MessageAndMetadata[String, String]) => (mmd.key(), mmd.message())

      //通過KafkaUtils建立直連的DStream(fromOffsets引數的作用是:按照前面計算好了的偏移量繼續消費資料)
      //[String, String, StringDecoder, StringDecoder,     (String, String)]
      //  key    value    key的解碼方式   value的解碼方式
      kafkaStream = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder, (String,  String)](ssc, kafkaParams, fromOffsets, messageHandler)
    } else {
      //如果未儲存,根據 kafkaParam 的配置使用最新(largest)或者最舊的(smallest) offset
      kafkaStream = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc,      kafkaParams, topics)
    }

    //偏移量的範圍
     var offsetRanges = Array[OffsetRange]()

    //如果你呼叫了DStream的Transformation,就不能使用直連方式
      kafkaStream.foreachRDD { kafkaRDD =>
      //只有KafkaRDD可以強轉成HasOffsetRanges,並獲取到偏移量
      offsetRanges = kafkaRDD.asInstanceOf[HasOffsetRanges].offsetRanges
      //val lines: RDD[String] = kafkaRDD.map(_._2)
      //對RDD進行操作,觸發Action
       lines.foreachPartition(partition =>
        partition.foreach(x => {
          println(x)
        })
      )
      for (o <- offsetRanges) {
        //  /g001/offsets/wordcount/0
        val zkPath = s"${topicDirs.consumerOffsetDir}/${o.partition}"
        //將該 partition 的 offset 儲存到 zookeeper
        //  /g001/offsets/wordcount/0/20000
        ZkUtils.updatePersistentPath(zkClient, zkPath, o.untilOffset.toString)
      }
    }
    ssc.start()
    ssc.awaitTermination()
  }
}

第二種是通過MySQL儲存偏移量
注意:這種方式使用的是scalikejdbc
匯入以下依賴

     <dependency>
            <groupId>org.scalikejdbc</groupId>
            <artifactId>scalikejdbc_2.11</artifactId>
            <version>2.5.0</version>
        </dependency>
        <dependency>
            <groupId>org.scalikejdbc</groupId>
            <artifactId>scalikejdbc-core_2.11</artifactId>
            <version>2.5.0</version>
        </dependency>
        <dependency>
            <groupId>org.scalikejdbc</groupId>
            <artifactId>scalikejdbc-config_2.11</artifactId>
            <version>2.5.0</version>
        </dependency>

需要配置以下資料庫連線

db.default.driver="com.mysql.jdbc.Driver"
db.default.url="jdbc:mysql://localhost:3306/test?characterEncoding="utf-8""
db.default.user="root"
db.default.password="root"
import com.alibaba.fastjson.{JSON, JSONObject}
import kafka.common.TopicAndPartition
import kafka.message.{Message, MessageAndMetadata}
import kafka.serializer.StringDecoder
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka.KafkaCluster.Err
import org.apache.spark.streaming.kafka.{HasOffsetRanges, KafkaCluster, KafkaUtils}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import scalikejdbc.{DB, SQL}
import scalikejdbc.config.DBs

object SparkStreamingOffsetMysql {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("ssom").setMaster("local[2]")
    val ssc = new StreamingContext(conf, Seconds(3))
    val groupId = "didi"
    val brokerList = "hadoop01:9092,hadoop02:9092,hadoop03:9092"
    val topic = "ditopic"
    val topics = Set(topic)
    val kafkas = Map(
      "metadata.broker.list" -> brokerList,
      "group.id" -> groupId,
      "auto.offset.reset" -> kafka.api.OffsetRequest.SmallestTimeString)
      DBs.setup()
     // 直接查詢mysql中的offset
       val fromOffset: Map[TopicAndPartition, Long] =
       DB.readOnly {
        implicit session => {
          SQL(s"select * from offset where groupId = '${groupId}'")
            //查詢出來後 將資料賦值給元組
            .map(m => (TopicAndPartition(
            m.string("topic"), m.int("partitions")), m.long("untilOffset")))
            .toList().apply()
        }.toMap //最後要toMap因為前面的返回值已經給定
      }
     //建立一個InputDStram 然後根據offset讀取資料
     var kafkaStream: InputDStream[(String, String)] = null
     //從mysql中獲取資料進行判斷
     if (fromOffset.size == 0) {
      //如果程式第一次啟動
      kafkaStream = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](
        ssc, kafkas, topics)
    } else {
      //如果程式不是第一次啟動
      var checckOffset = Map[TopicAndPartition, Long]()
      val kafkaCluster = new KafkaCluster(kafkas)
      val earliesOffset: Either[Err, Map[TopicAndPartition, KafkaCluster.LeaderOffset]] =
        kafkaCluster.getEarliestLeaderOffsets(fromOffset.keySet)
      //然後開始比較大小 用Mysql中的offset和kafka的offset進行比較
      if (earliesOffset.isRight) {
        val topicAndPartitionOffset: Map[TopicAndPartition, KafkaCluster.LeaderOffset] = earliesOffset.right.get
        //來個直接進行比較大小
        fromOffset.map(owner => {
          //取kafka彙總的offset
          val topicOffset = topicAndPartitionOffset.get(owner._1).get.offset
          if (owner._2 > topicOffset) {
            owner
          } else {
            (owner._1, topicOffset)
          }
        })
      }
      val messageHandler = (mmd: MessageAndMetadata[String, String]) => {
        (mmd.key(), mmd.message())
      }
      kafkaStream = KafkaUtils.createDirectStream[String, String,
        StringDecoder, StringDecoder, (String, String)](
        ssc, kafkas, checckOffset, messageHandler)
    }
    kafkaStream.foreachRDD(kafkaRDD => {
      val offsetRanges = kafkaRDD.asInstanceOf[HasOffsetRanges].offsetRanges
     kafkaRDD.map(_._2).foreachPartition(partition =>
        partition.foreach(x => {
          println(x)
        })
     
      DB.localTx {
        implicit session =>
          for (os <- offsetRanges) {
            /*  SQL("update offset set groupId=?,topic=?,partitions=?,untilOffset=?")
             .bind(groupId,os.topic,os.partition,os.untilOffset).update().apply()*/
            SQL("replace into offset(groupId,topic,partitions,untilOffset) values(?,?,?,?)")
              .bind(groupId, os.topic, os.partition, os.untilOffset).update().apply()
          }
      }
    })
    ssc.start()
    ssc.awaitTermination()
  }
}