1. 程式人生 > >Kafka學習筆記-Java簡單操作

Kafka學習筆記-Java簡單操作

interrupt new repl simple and cto seed blog false

轉自:http://www.cnblogs.com/edison2012/p/5759223.html

Maven依賴包:

  1. <dependency>
  2. <groupId>org.apache.kafka</groupId>
  3. <artifactId>kafka-clients</artifactId>
  4. <version>0.8.2.1</version>
  5. </dependency>
  6. <dependency>
  7. <groupId>org.apache.kafka</groupId>
  8. <artifactId>kafka_2.11</artifactId>
  9. <version>0.8.2.1</version>
  10. </dependency>


代碼如下:

  1. import java.util.Properties;
  2. import org.apache.kafka.clients.producer.Callback;
  3. import org.apache.kafka.clients.producer.KafkaProducer;
  4. import org.apache.kafka.clients.producer.ProducerRecord;
  5. import org.apache.kafka.clients.producer.RecordMetadata;
  6. import org.slf4j.Logger;
  7. import org.slf4j.LoggerFactory;
  8. public class KafkaProducerTest {
  9. private static final Logger LOG = LoggerFactory.getLogger(KafkaProducerTest.class);
  10. private static Properties properties = null;
  11. static {
  12. properties = new Properties();
  13. properties.put("bootstrap.servers", "centos.master:9092,centos.slave1:9092,centos.slave2:9092");
  14. properties.put("producer.type", "sync");
  15. properties.put("request.required.acks", "1");
  16. properties.put("serializer.class", "kafka.serializer.DefaultEncoder");
  17. properties.put("partitioner.class", "kafka.producer.DefaultPartitioner");
  18. properties.put("key.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
  19. // properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
  20. properties.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
  21. // properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
  22. }
  23. public void produce() {
  24. KafkaProducer<byte[], byte[]> kafkaProducer = new KafkaProducer<byte[],byte[]>(properties);
  25. ProducerRecord<byte[],byte[]> kafkaRecord = new ProducerRecord<byte[],byte[]>(
  26. "test", "kkk".getBytes(), "vvv".getBytes());
  27. kafkaProducer.send(kafkaRecord, new Callback() {
  28. public void onCompletion(RecordMetadata metadata, Exception e) {
  29. if(null != e) {
  30. LOG.info("the offset of the send record is {}", metadata.offset());
  31. LOG.error(e.getMessage(), e);
  32. }
  33. LOG.info("complete!");
  34. }
  35. });
  36. kafkaProducer.close();
  37. }
  38. public static void main(String[] args) {
  39. KafkaProducerTest kafkaProducerTest = new KafkaProducerTest();
  40. for (int i = 0; i < 10; i++) {
  41. kafkaProducerTest.produce();
  42. }
  43. }
  44. }

  1. import java.util.List;
  2. import java.util.Map;
  3. import java.util.Properties;
  4. import org.apache.kafka.clients.consumer.ConsumerConfig;
  5. import org.apache.kafka.clients.consumer.ConsumerRecord;
  6. import org.apache.kafka.clients.consumer.ConsumerRecords;
  7. import org.apache.kafka.clients.consumer.KafkaConsumer;
  8. import org.slf4j.Logger;
  9. import org.slf4j.LoggerFactory;
  10. public class KafkaConsumerTest {
  11. private static final Logger LOG = LoggerFactory.getLogger(KafkaConsumerTest.class);
  12. public static void main(String[] args) {
  13. Properties properties = new Properties();
  14. properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
  15. "centos.master:9092,centos.slave1:9092,centos.slave2:9092");
  16. properties.put(ConsumerConfig.GROUP_ID_CONFIG, "test-consumer-group");
  17. properties.put(ConsumerConfig.SESSION_TIMEOUT_MS, "1000");
  18. properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
  19. properties.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY, "range");
  20. // properties.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY, "roundrobin");
  21. properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "10000");
  22. properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
  23. "org.apache.kafka.common.serialization.ByteArrayDeserializer");
  24. properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
  25. "org.apache.kafka.common.serialization.ByteArrayDeserializer");
  26. KafkaConsumer<byte[], byte[]> kafkaConsumer = new KafkaConsumer<byte[], byte[]>(properties);
  27. kafkaConsumer.subscribe("test");
  28. // kafkaConsumer.subscribe("*");
  29. boolean isRunning = true;
  30. while(isRunning) {
  31. Map<String, ConsumerRecords<byte[], byte[]>> results = kafkaConsumer.poll(100);
  32. if (null != results) {
  33. for (Map.Entry<String, ConsumerRecords<byte[], byte[]>> entry : results.entrySet()) {
  34. LOG.info("topic {}", entry.getKey());
  35. ConsumerRecords<byte[], byte[]> consumerRecords = entry.getValue();
  36. List<ConsumerRecord<byte[], byte[]>> records = consumerRecords.records();
  37. for (int i = 0, len = records.size(); i < len; i++) {
  38. ConsumerRecord<byte[], byte[]> consumerRecord = records.get(i);
  39. LOG.info("topic {} partition {}", consumerRecord.topic(), consumerRecord.partition());
  40. try {
  41. LOG.info("offset {} value {}", consumerRecord.offset(), new String(consumerRecord.value()));
  42. } catch (Exception e) {
  43. LOG.error(e.getMessage(), e);
  44. }
  45. }
  46. }
  47. }
  48. }
  49. kafkaConsumer.close();
  50. }
  51. }

發現KafkaConsumer的poll方法未實現

  1. @Override
  2. public Map<String, ConsumerRecords<K,V>> poll(long timeout) {
  3. // TODO Auto-generated method stub
  4. return null;
  5. }


後改為kafka.javaapi.consumer.SimpleConsumer實現,正常運行

    1. import java.nio.ByteBuffer;
    2. import java.util.ArrayList;
    3. import java.util.Collections;
    4. import java.util.HashMap;
    5. import java.util.List;
    6. import java.util.Map;
    7. import kafka.api.FetchRequest;
    8. import kafka.api.FetchRequestBuilder;
    9. import kafka.api.PartitionOffsetRequestInfo;
    10. import kafka.cluster.Broker;
    11. import kafka.common.ErrorMapping;
    12. import kafka.common.TopicAndPartition;
    13. import kafka.javaapi.FetchResponse;
    14. import kafka.javaapi.OffsetRequest;
    15. import kafka.javaapi.OffsetResponse;
    16. import kafka.javaapi.PartitionMetadata;
    17. import kafka.javaapi.TopicMetadata;
    18. import kafka.javaapi.TopicMetadataRequest;
    19. import kafka.javaapi.TopicMetadataResponse;
    20. import kafka.javaapi.consumer.SimpleConsumer;
    21. import kafka.message.MessageAndOffset;
    22. public class KafkaSimpleConsumerTest {
    23. private List<String> borkerList = new ArrayList<String>();
    24. public KafkaSimpleConsumerTest() {
    25. borkerList = new ArrayList<String>();
    26. }
    27. public static void main(String args[]) {
    28. KafkaSimpleConsumerTest kafkaSimpleConsumer = new KafkaSimpleConsumerTest();
    29. // 最大讀取消息數量
    30. long maxReadNum = Long.parseLong("3");
    31. // 訂閱的topic
    32. String topic = "test";
    33. // 查找的分區
    34. int partition = Integer.parseInt("0");
    35. // broker節點
    36. List<String> seeds = new ArrayList<String>();
    37. seeds.add("centos.master");
    38. seeds.add("centos.slave1");
    39. seeds.add("centos.slave2");
    40. // 端口
    41. int port = Integer.parseInt("9092");
    42. try {
    43. kafkaSimpleConsumer.run(maxReadNum, topic, partition, seeds, port);
    44. } catch (Exception e) {
    45. System.out.println("Oops:" + e);
    46. e.printStackTrace();
    47. }
    48. }
    49. public void run(long maxReadNum, String topic, int partition, List<String> seedBrokers, int port) throws Exception {
    50. // 獲取指定topic partition的元數據
    51. PartitionMetadata metadata = findLeader(seedBrokers, port, topic, partition);
    52. if (metadata == null) {
    53. System.out.println("can‘t find metadata for topic and partition. exit");
    54. return;
    55. }
    56. if (metadata.leader() == null) {
    57. System.out.println("can‘t find leader for topic and partition. exit");
    58. return;
    59. }
    60. String leadBroker = metadata.leader().host();
    61. String clientName = "client_" + topic + "_" + partition;
    62. SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName);
    63. long readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.EarliestTime(), clientName);
    64. int numErrors = 0;
    65. while (maxReadNum > 0) {
    66. if (consumer == null) {
    67. consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName);
    68. }
    69. FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(topic, partition, readOffset, 100000).build();
    70. FetchResponse fetchResponse = consumer.fetch(req);
    71. if (fetchResponse.hasError()) {
    72. numErrors++;
    73. short code = fetchResponse.errorCode(topic, partition);
    74. System.out.println("error fetching data from the broker:" + leadBroker + " reason: " + code);
    75. if (numErrors > 5)
    76. break;
    77. if (code == ErrorMapping.OffsetOutOfRangeCode()) {
    78. readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(), clientName);
    79. continue;
    80. }
    81. consumer.close();
    82. consumer = null;
    83. leadBroker = findNewLeader(leadBroker, topic, partition, port);
    84. continue;
    85. }
    86. numErrors = 0;
    87. long numRead = 0;
    88. for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) {
    89. long currentOffset = messageAndOffset.offset();
    90. if (currentOffset < readOffset) {
    91. System.out.println("found an old offset: " + currentOffset + " expecting: " + readOffset);
    92. continue;
    93. }
    94. readOffset = messageAndOffset.nextOffset();
    95. ByteBuffer payload = messageAndOffset.message().payload();
    96. byte[] bytes = new byte[payload.limit()];
    97. payload.get(bytes);
    98. System.out.println(String.valueOf(messageAndOffset.offset()) + ": " + new String(bytes, "UTF-8"));
    99. numRead++;
    100. maxReadNum--;
    101. }
    102. if (numRead == 0) {
    103. try {
    104. Thread.sleep(1000);
    105. } catch (InterruptedException ie) {
    106. }
    107. }
    108. }
    109. if (consumer != null)
    110. consumer.close();
    111. }
    112. /**
    113. * 從活躍的Broker列表中找出指定Topic、Partition中的Leader Broker
    114. * @param seedBrokers
    115. * @param port
    116. * @param topic
    117. * @param partition
    118. * @return
    119. */
    120. private PartitionMetadata findLeader(List<String> seedBrokers, int port, String topic, int partition) {
    121. PartitionMetadata partitionMetadata = null;
    122. loop: for (String seedBroker : seedBrokers) {
    123. SimpleConsumer consumer = null;
    124. try {
    125. consumer = new SimpleConsumer(seedBroker, port, 100000, 64 * 1024, "leaderLookup");
    126. List<String> topics = Collections.singletonList(topic);
    127. TopicMetadataRequest topicMetadataRequest = new TopicMetadataRequest(topics);
    128. TopicMetadataResponse topicMetadataResponse = consumer.send(topicMetadataRequest);
    129. List<TopicMetadata> topicMetadatas = topicMetadataResponse.topicsMetadata();
    130. for (TopicMetadata topicMetadata : topicMetadatas) {
    131. for (PartitionMetadata pMetadata : topicMetadata.partitionsMetadata()) {
    132. if (pMetadata.partitionId() == partition) {
    133. partitionMetadata = pMetadata;
    134. break loop;
    135. }
    136. }
    137. }
    138. } catch (Exception e) {
    139. System.out.println("error communicating with broker [" + seedBroker + "] to find leader for [" + topic + ", " + partition + "] reason: " + e);
    140. } finally {
    141. if (consumer != null)
    142. consumer.close();
    143. }
    144. }
    145. if (partitionMetadata != null) {
    146. borkerList.clear();
    147. for (Broker replica : partitionMetadata.replicas()) {
    148. borkerList.add(replica.host());
    149. }
    150. }
    151. return partitionMetadata;
    152. }
    153. public static long getLastOffset(SimpleConsumer consumer, String topic, int partition, long whichTime, String clientName) {
    154. TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
    155. Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
    156. requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
    157. OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
    158. OffsetResponse response = consumer.getOffsetsBefore(request);
    159. if (response.hasError()) {
    160. System.out.println("error fetching data offset data the broker. reason: " + response.errorCode(topic, partition));
    161. return 0;
    162. }
    163. long[] offsets = response.offsets(topic, partition);
    164. return offsets[0];
    165. }
    166. private String findNewLeader(String oldLeader, String topic, int partition, int port) throws Exception {
    167. for (int i = 0; i < 3; i++) {
    168. boolean goToSleep = false;
    169. PartitionMetadata metadata = findLeader(borkerList, port, topic, partition);
    170. if (metadata == null) {
    171. goToSleep = true;
    172. } else if (metadata.leader() == null) {
    173. goToSleep = true;
    174. } else if (oldLeader.equalsIgnoreCase(metadata.leader().host()) && i == 0) {
    175. goToSleep = true;
    176. } else {
    177. return metadata.leader().host();
    178. }
    179. if (goToSleep) {
    180. try {
    181. Thread.sleep(1000);
    182. } catch (InterruptedException ie) {
    183. }
    184. }
    185. }
    186. System.out.println("unable to find new leader after broker failure. exit");
    187. throw new Exception("unable to find new leader after broker failure. exit");
    188. }
    189. }

Kafka學習筆記-Java簡單操作