易涛 3 роки тому
батько
коміт
5950b03820

+ 23 - 51
src/main/java/com/persagy/kafka/KafkaCommonConfig.java

@@ -9,6 +9,8 @@ import org.apache.kafka.common.serialization.StringSerializer;
 import org.springframework.beans.factory.InitializingBean;
 import org.springframework.beans.factory.annotation.Value;
 import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
+import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
+import org.springframework.boot.context.properties.EnableConfigurationProperties;
 import org.springframework.context.annotation.Bean;
 import org.springframework.context.annotation.Configuration;
 import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
@@ -31,59 +33,23 @@ import java.util.Set;
  **/
 @Slf4j
 @Configuration
+@EnableConfigurationProperties({KafkaProperties.class})
 @ConditionalOnProperty(prefix = "spring.kafka",name = "enable",havingValue = "true")
 public class KafkaCommonConfig implements InitializingBean {
 
-    @Value("${spring.kafka.bootstrapServers}")
-    private String bootstrapServers;
-
-    @Value("${spring.kafka.consumer.auto-offset-reset}")
-    private String autoOffsetReset;
-
-    @Value("${spring.kafka.consumer.enable-auto-commit}")
-    private String enableAutoCommit;
-
-    @Value("${spring.kafka.consumer.group-id}")
-    private String groupId;
-
-
-    @Value("${spring.kafka.producer.acks}")
-    private String acks;
-
-    @Value("${spring.kafka.producer.batch-size}")
-    private String batchSize;
-
-    @Value("${spring.kafka.producer.linger-ms}")
-    private String lingerMs;
-
     @Value("${spring.kafka.consumer.topics}")
-    private String topics;
+    private String edgeTopic;
 
+    private final KafkaProperties properties;
 
-    //创建生产者配置map,ProducerConfig中的可配置属性比spring boot自动配置要多
-    private Map<String, Object> producerProperties(){
-        Map<String, Object> props = new HashMap<>();
-        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
-        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
-        props.put(ProducerConfig.ACKS_CONFIG, acks);
-        props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
-        props.put(ProducerConfig.LINGER_MS_CONFIG, lingerMs);
-        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
-
-        return props;
+    public KafkaCommonConfig(KafkaProperties properties) {
+        this.properties = properties;
     }
 
-
     //构造消费者属性map,ConsumerConfig中的可配置属性比spring boot自动配置要多
     private Map<String, Object> consumerProperties(){
-        Map<String, Object> props = new HashMap<>();
-        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
-        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
-        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
-        props.put(ConsumerConfig.GROUP_ID_CONFIG,groupId);
-        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,autoOffsetReset);
-        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
-        return props;
+//        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
+        return properties.buildConsumerProperties();
     }
 
     /**
@@ -95,21 +61,27 @@ public class KafkaCommonConfig implements InitializingBean {
         return new DefaultKafkaConsumerFactory(consumerProperties());
     }
 
-
     @Bean("listenerContainerFactory")
     //个性化定义消费者
     public ConcurrentKafkaListenerContainerFactory listenerContainerFactory(DefaultKafkaConsumerFactory consumerFactory) {
         //指定使用DefaultKafkaConsumerFactory
         ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory();
         factory.setConsumerFactory(consumerFactory);
-        //设置消费者ack模式为手动,看需求设置
-        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL);
+
+//        //设置消费者ack模式为手动,看需求设置
+        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
         //设置可批量拉取消息消费,拉取数量一次3,看需求设置
         factory.setConcurrency(1);
         factory.setBatchListener(true);
         return factory;
     }
 
+    //创建生产者配置map,ProducerConfig中的可配置属性比spring boot自动配置要多
+    private Map<String, Object> producerProperties(){
+
+        return properties.buildProducerProperties();
+    }
+
     /**
      * 不使用spring boot的KafkaAutoConfiguration默认方式创建的DefaultKafkaProducerFactory,重新定义
      * @return
@@ -119,6 +91,7 @@ public class KafkaCommonConfig implements InitializingBean {
         return new DefaultKafkaProducerFactory(producerProperties());
     }
 
+
     /**
      * 不使用spring boot的KafkaAutoConfiguration默认方式创建的KafkaTemplate,重新定义
      * @param produceFactory
@@ -129,17 +102,16 @@ public class KafkaCommonConfig implements InitializingBean {
         return new KafkaTemplate(produceFactory);
     }
 
-
     @Override
-    public void afterPropertiesSet() {
+    public void afterPropertiesSet() throws Exception {
         String topicName = wireTopics();
         System.setProperty("topicName", topicName);
-        log.info("### set system config topic:{}"+ topicName);
+        log.info("### set system config topic:{}",topicName);
     }
 
     private String wireTopics(){
         Set<String> topicSet = new HashSet<>();
-            topicSet.add(topics);
+        topicSet.add(edgeTopic);
         return StringUtils.join(topicSet,",");
     }
-}
+}

+ 16 - 4
src/main/resources/application.yml

@@ -45,7 +45,14 @@ spring:
       ddl-auto: none
   kafka:
     enable: false
-    bootstrapServers: 192.168.64.16:9092
+    bootstrapServers: 113.108.52.72:4439,113.108.52.72:4440
+    ssl:
+      #protocol: SSL
+      key-password: cmsk-ioc-1234 #kafkadockerclient
+      key-store-location: file://D:\client.keystore.jks # file://e:\certs\docker.kafka.client.keystore.jks
+      key-store-password: cmsk-ioc-1234 #kafkadockerclient
+      trust-store-location: file://D:\client.truststore.jks #file://e:\certs\docker.kafka.client.truststore.jks
+      trust-store-password: cmsk-ioc-1234 #kafkadockerclient
     producer:
       # 发生错误后,消息重发的次数。
       retries: 2
@@ -56,11 +63,13 @@ spring:
       # acks=0 : 生产者在成功写入消息之前不会等待任何来自服务器的响应。
       # acks=1 : 只要集群的首领节点收到消息,生产者就会收到一个来自服务器成功响应。
       # acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。
-      acks: "1"
+      acks: 1
       properties:
         max.request.size: 50000000
+        security.protocol: SSL
+        ssl.endpoint.identification.algorithm:
       linger-ms: 500
-      topics: topic.alarm.edge
+      topics: persage.alarm.edge2alarm
     consumer:
       # 自动提交的时间间隔 在spring boot 2.X 版本中这里采用的是值的类型为Duration 需要符合特定的格式,如1S,1M,2H,5D
       auto-commit-interval: 1S
@@ -71,7 +80,10 @@ spring:
       # 是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量
       enable-auto-commit: false
       group-id: ibms-project-alarm
-      topics: topic.alarm.cloud
+      topics: persage.alarm.alarm2edge
+      properties:
+        security.protocol: SSL
+        ssl.endpoint.identification.algorithm:
     listener:
       # 在侦听器容器中运行的线程数。
       concurrency: 5