瀏覽代碼

新增kafka通讯方式

易涛 3 年之前
父節點
當前提交
ee58432a59

+ 4 - 0
pom.xml

@@ -160,6 +160,10 @@
             <artifactId>objenesis</artifactId>
             <version>${objenesis.version}</version>
         </dependency>
+        <dependency>
+            <groupId>org.springframework.kafka</groupId>
+            <artifactId>spring-kafka</artifactId>
+        </dependency>
     </dependencies>
 
     <build>

+ 7 - 0
src/main/java/com/persagy/entity/NettyMessage.java

@@ -55,6 +55,13 @@ public class NettyMessage<T> extends Packet implements Serializable {
     private Boolean success;
 
 
+    public NettyMessage(int opCode,String channelId){
+        this.channelId = channelId;
+        this.projectId = CommonConst.projectId;
+        this.groupCode = CommonConst.groupCode;
+        this.opCode = opCode;
+    }
+
     public NettyMessage(int opCode) {
         this.channelId = NettyClient.channel.id().toString();
         this.projectId = CommonConst.projectId;

+ 6 - 2
src/main/java/com/persagy/init/InitRunner.java

@@ -29,14 +29,18 @@ public class InitRunner implements ServletContextAttributeListener, CommandLineR
     private WebSocketClientFactory webSocketClientFactory;
     @Autowired
     private NettyClient nettyClient;
+    @Value("${spring.kafka.enable:false}")
+    private Boolean kafkaEnable;
 
     @Override
     public void run(String... args) throws Exception {
 
         //5.0 开始引入了 LRU 缓存,可指定缓存的表达式个数,比如设置为最大 1 万个缓存结果:
         AviatorEvaluator.getInstance().useLRUExpressionCache(10000);
-        //启动netty客户端,接受云端数据
-        nettyClient.connect();
+        if(!kafkaEnable) {
+            //启动netty客户端,接受云端数据
+            nettyClient.connect();
+        }
         //启动websocket,接受IOT采集庶数据
         webSocketClientFactory.retryOutCallWebSocketClient();
         //异步消费消息

+ 0 - 1
src/main/java/com/persagy/job/AlarmMessageThread.java

@@ -23,7 +23,6 @@ public class AlarmMessageThread implements Runnable {
     @Override
     public void run() {
         try {
-            System.out.println("--init--");
             //启动后先休息20秒,这样报警netty  websocket  quartz都启动了
             Thread.sleep(20000);
             while (true) {

+ 14 - 3
src/main/java/com/persagy/job/SpringSchedule.java

@@ -8,6 +8,7 @@ import com.persagy.entity.NettyMessage;
 import com.persagy.netty.client.NettyClient;
 import lombok.extern.slf4j.Slf4j;
 import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.beans.factory.annotation.Value;
 import org.springframework.scheduling.annotation.Scheduled;
 import org.springframework.stereotype.Service;
 
@@ -27,10 +28,17 @@ import java.util.Objects;
 public class SpringSchedule {
     @Autowired
     NettyClient nettyClient;
+    @Value("${spring.kafka.enable:false}")
+    private Boolean kafkaEnable;
 
     @Scheduled(cron = "${alarm.get.all.alarmdefine.cron}")
     public void allResetCron() throws InterruptedException {
-        NettyMessage message = new NettyMessage(4);
+        NettyMessage message;
+        if(kafkaEnable){
+            message = new NettyMessage(4,null);
+        }else{
+            message = new NettyMessage(4);
+        }
         JSONObject content = new JSONObject();
         content.put("groupCode", CommonConst.groupCode);
         content.put("projectId", CommonConst.projectId);
@@ -38,7 +46,7 @@ public class SpringSchedule {
         nettyClient.sendMessage(message);
     }
 
-    //@Scheduled(initialDelay = 2000, fixedDelay = 60000)
+    /*@Scheduled(initialDelay = 2000, fixedDelay = 60000)
     public void connectCron() {
         if (Objects.isNull(NettyClient.channel)) {
             log.warn("NettyClient is not init");
@@ -46,10 +54,13 @@ public class SpringSchedule {
         }
         NettyMessage message = new NettyMessage(3);
         nettyClient.sendMessageNotCheck(message);
-    }
+    }*/
 
     @Scheduled(initialDelay = 1000, fixedDelay = 600000)
     public void connectAnalizeCron() {
+        if(kafkaEnable){
+            return;
+        }
         if (Objects.isNull(NettyClient.channel)) {
             log.warn("NettyClient is not init");
             return;

+ 145 - 0
src/main/java/com/persagy/kafka/KafkaCommonConfig.java

@@ -0,0 +1,145 @@
+package com.persagy.kafka;
+
+import lombok.extern.slf4j.Slf4j;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.kafka.common.serialization.StringDeserializer;
+import org.apache.kafka.common.serialization.StringSerializer;
+import org.springframework.beans.factory.InitializingBean;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
+import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
+import org.springframework.kafka.core.DefaultKafkaProducerFactory;
+import org.springframework.kafka.core.KafkaTemplate;
+import org.springframework.kafka.listener.ContainerProperties;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * kafka配置,实际上,在KafkaAutoConfiguration中已经有默认的根据配置文件信息创建配置,但是自动配置属性没有涵盖所有
+ * 我们可以自定义创建相关bean,进行如下配置
+ *
+ * @author zhoujy
+ * @date 2018年12月17日
+ **/
+@Slf4j
+@Configuration
+@ConditionalOnProperty(prefix = "spring.kafka",name = "enable",havingValue = "true")
+public class KafkaCommonConfig implements InitializingBean {
+
+    @Value("${spring.kafka.bootstrapServers}")
+    private String bootstrapServers;
+
+    @Value("${spring.kafka.consumer.auto-offset-reset}")
+    private String autoOffsetReset;
+
+    @Value("${spring.kafka.consumer.enable-auto-commit}")
+    private String enableAutoCommit;
+
+    @Value("${spring.kafka.consumer.group-id}")
+    private String groupId;
+
+
+    @Value("${spring.kafka.producer.acks}")
+    private String acks;
+
+    @Value("${spring.kafka.producer.batch-size}")
+    private String batchSize;
+
+    @Value("${spring.kafka.producer.linger-ms}")
+    private String lingerMs;
+
+    @Value("${spring.kafka.consumer.topics}")
+    private String topics;
+
+
+    //创建生产者配置map,ProducerConfig中的可配置属性比spring boot自动配置要多
+    private Map<String, Object> producerProperties(){
+        Map<String, Object> props = new HashMap<>();
+        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
+        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
+        props.put(ProducerConfig.ACKS_CONFIG, acks);
+        props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
+        props.put(ProducerConfig.LINGER_MS_CONFIG, lingerMs);
+        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
+
+        return props;
+    }
+
+
+    //构造消费者属性map,ConsumerConfig中的可配置属性比spring boot自动配置要多
+    private Map<String, Object> consumerProperties(){
+        Map<String, Object> props = new HashMap<>();
+        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
+        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
+        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
+        props.put(ConsumerConfig.GROUP_ID_CONFIG,groupId);
+        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,autoOffsetReset);
+        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
+        return props;
+    }
+
+    /**
+     * 不使用spring boot默认方式创建的DefaultKafkaConsumerFactory,重新定义创建方式
+     * @return
+     */
+    @Bean("consumerFactory")
+    public DefaultKafkaConsumerFactory consumerFactory(){
+        return new DefaultKafkaConsumerFactory(consumerProperties());
+    }
+
+
+    @Bean("listenerContainerFactory")
+    //个性化定义消费者
+    public ConcurrentKafkaListenerContainerFactory listenerContainerFactory(DefaultKafkaConsumerFactory consumerFactory) {
+        //指定使用DefaultKafkaConsumerFactory
+        ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory();
+        factory.setConsumerFactory(consumerFactory);
+        //设置消费者ack模式为手动,看需求设置
+        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL);
+        //设置可批量拉取消息消费,拉取数量一次3,看需求设置
+        factory.setConcurrency(1);
+        factory.setBatchListener(true);
+        return factory;
+    }
+
+    /**
+     * 不使用spring boot的KafkaAutoConfiguration默认方式创建的DefaultKafkaProducerFactory,重新定义
+     * @return
+     */
+    @Bean("produceFactory")
+    public DefaultKafkaProducerFactory produceFactory(){
+        return new DefaultKafkaProducerFactory(producerProperties());
+    }
+
+    /**
+     * 不使用spring boot的KafkaAutoConfiguration默认方式创建的KafkaTemplate,重新定义
+     * @param produceFactory
+     * @return
+     */
+    @Bean
+    public KafkaTemplate kafkaTemplate(DefaultKafkaProducerFactory produceFactory){
+        return new KafkaTemplate(produceFactory);
+    }
+
+
+    @Override
+    public void afterPropertiesSet() {
+        String topicName = wireTopics();
+        System.setProperty("topicName", topicName);
+        log.info("### set system config topic:{}"+ topicName);
+    }
+
+    private String wireTopics(){
+        Set<String> topicSet = new HashSet<>();
+            topicSet.add(topics);
+        return StringUtils.join(topicSet,",");
+    }
+}

+ 134 - 0
src/main/java/com/persagy/kafka/KafkaConsumer.java

@@ -0,0 +1,134 @@
+package com.persagy.kafka;
+import cn.hutool.core.collection.CollectionUtil;
+import cn.hutool.core.date.DateUtil;
+import cn.hutool.core.date.TimeInterval;
+import com.alibaba.fastjson.JSONObject;
+import com.alibaba.fastjson.TypeReference;
+import com.persagy.cache.AlarmInfoCache;
+import com.persagy.constant.CommonConst;
+import com.persagy.entity.AlarmDefine;
+import com.persagy.entity.NettyMessage;
+import com.persagy.entity.ZktAlarmRecordDO;
+import com.persagy.netty.client.NettyClient;
+import com.persagy.repository.AlarmRecordRepository;
+import com.persagy.service.AlarmDefineService;
+import com.persagy.utils.LockUtil;
+import io.netty.channel.ChannelHandlerContext;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.kafka.annotation.KafkaListener;
+import org.springframework.kafka.support.Acknowledgment;
+import org.springframework.kafka.support.KafkaHeaders;
+import org.springframework.messaging.handler.annotation.Header;
+import org.springframework.stereotype.Component;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.Objects;
+import java.util.Optional;
+
+/**
+ * kafak消费
+ * @author 易涛
+ * @version 1.0
+ */
+@Configuration
+@Slf4j
+@ConditionalOnProperty(prefix = "spring.kafka",name = "enable",havingValue = "true")
+public class KafkaConsumer {
+
+    @Autowired
+    private AlarmDefineService alarmDefineService;
+
+    @Autowired
+    private AlarmRecordRepository alarmRecordRepository;
+
+    @KafkaListener(topics = {"#{'${topicName}'.split(',')}"}, containerFactory = "listenerContainerFactory")
+    public void topicCloudAlarmConsumer(List<ConsumerRecord<?,String>> record, Acknowledgment ack) {
+        Iterator<ConsumerRecord<?, String>> it = record.iterator();
+        while (it.hasNext()) {
+            ConsumerRecord<?, String> consumerRecords = it.next();
+            Optional<String> message = Optional.ofNullable(consumerRecords.value());
+            if (message.isPresent()) {
+                NettyMessage msg = JSONObject.parseObject(message.get(),NettyMessage.class);
+                try {
+                    if (Objects.equals(msg.getProjectId(), CommonConst.projectId)) {
+                        handlerMsg(msg);
+                        ack.acknowledge();
+                    }
+                } catch (Exception e) {
+                    log.error("处理kafka消息失败", e);
+                }
+            }
+        }
+    }
+
+    private void handlerMsg(NettyMessage msg) {
+        if (msg.getOpCode() == 7) {
+            log.info("--报警定义新增或更新--{}",msg);
+            NettyMessage<AlarmDefine> alarmDefineMessage = JSONObject.parseObject(msg.toString(), new TypeReference<NettyMessage<AlarmDefine>>() {
+            });
+            List<AlarmDefine> definesList = alarmDefineMessage.getContent();
+            if (CollectionUtil.isNotEmpty(definesList)) {
+                alarmDefineService.listSomeAlarmDefine(definesList);
+            }
+        } else if (msg.getOpCode() == 8) {
+            log.info("-----报警记录id推送----[{}]", msg);
+            List content = msg.getContent();
+            if (CollectionUtil.isNotEmpty(content)) {
+                JSONObject parseObject = JSONObject.parseObject(JSONObject.toJSONString(content.get(0)));
+                String defineId = AlarmInfoCache.getAlarmDefineId(parseObject);
+                ZktAlarmRecordDO zktAlarmRecordDO = alarmRecordRepository.findById(defineId).orElse(new ZktAlarmRecordDO());
+                zktAlarmRecordDO.setDefinitionId(defineId);
+                zktAlarmRecordDO.setObjId(parseObject.getString("objId"));
+                zktAlarmRecordDO.setItemId(parseObject.getString("itemId"));
+                zktAlarmRecordDO.setAlarmId(parseObject.getString("id"));
+                alarmRecordRepository.save(zktAlarmRecordDO);
+            }
+        } else if (msg.getOpCode() == 9) {
+            NettyMessage<AlarmDefine> alarmDefineMessage = JSONObject.parseObject(msg.toString(), new TypeReference<NettyMessage<AlarmDefine>>() {
+            });
+            List<AlarmDefine> definesList = alarmDefineMessage.getContent();
+            if (CollectionUtil.isNotEmpty(definesList)) {
+                try {
+                    LockUtil.getInstance().lock.lock();
+                    LockUtil.getInstance().setExecute(false);
+                    //加个等待,保证正在执行的逻辑执行成功
+                    Thread.sleep(4000);
+                    alarmDefineService.listAllAlarmDefine(definesList);
+                    LockUtil.getInstance().setExecute(true);
+                    LockUtil.getInstance().condition.signalAll();
+                } catch (InterruptedException e) {
+                    e.printStackTrace();
+                } finally {
+                    LockUtil.getInstance().lock.unlock();
+                }
+            }
+        } else if (msg.getOpCode() == 10) {
+            NettyMessage<AlarmDefine> alarmDefineMessage = JSONObject.parseObject(msg.toString(), new TypeReference<NettyMessage<AlarmDefine>>() {
+            });
+            List<AlarmDefine> definesList = alarmDefineMessage.getContent();
+            if (CollectionUtil.isNotEmpty(definesList)) {
+                alarmDefineService.deleteAlarmDefine(definesList);
+            }
+        } else if (msg.getOpCode() == 11) {
+            //更新隔离的系统对象
+            NettyMessage<String> alarmDefineMessage = JSONObject.parseObject(msg.toString(), new TypeReference<NettyMessage<String>>() {
+            });
+            List<String> isolationSystemList = alarmDefineMessage.getContent();
+            if (CollectionUtil.isNotEmpty(isolationSystemList)) {
+                AlarmInfoCache.isolationSystemList = isolationSystemList;
+            }
+        } else if (msg.getOpCode() == 12) {
+            //云端更新报警记录状态
+            NettyMessage<JSONObject> alarmDefineMessage = JSONObject.parseObject(msg.toString(), new TypeReference<NettyMessage<JSONObject>>() {});
+            List<JSONObject> stateList = alarmDefineMessage.getContent();
+            alarmDefineService.updateAlarmDefine(stateList);
+        }
+    }
+
+}

+ 76 - 0
src/main/java/com/persagy/kafka/KafkaProducer.java

@@ -0,0 +1,76 @@
+package com.persagy.kafka;
+import com.alibaba.fastjson.JSONObject;
+import com.persagy.constant.CommonConst;
+import com.persagy.entity.NettyMessage;
+import com.persagy.netty.client.NettyClient;
+import lombok.extern.slf4j.Slf4j;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.core.annotation.Order;
+import org.springframework.kafka.core.KafkaTemplate;
+import org.springframework.kafka.support.SendResult;
+import org.springframework.stereotype.Component;
+import org.springframework.util.concurrent.ListenableFuture;
+import org.springframework.util.concurrent.ListenableFutureCallback;
+
+import javax.annotation.PostConstruct;
+import java.util.Arrays;
+
+/**
+ * kafak生产
+ * @author 易涛
+ * @version 1.0
+ * @date 2021/8/26 09:33
+ */
+@Configuration
+@Slf4j
+@ConditionalOnProperty(prefix = "spring.kafka",name = "enable",havingValue = "true")
+public class KafkaProducer {
+
+    @Autowired
+    private KafkaTemplate<String, Object> kafkaTemplate;
+    @Autowired
+    private NettyClient nettyClient;
+
+    /**
+     * 边缘端报警发送topic
+     */
+    @Value("${spring.kafka.producer.topics}")
+    private String topicEdgeAlarm;
+
+    @Bean
+    @Order(2)
+    public void queryDefine() throws InterruptedException {
+        //建立连接
+        nettyClient.sendMessage(new NettyMessage(200,null));
+        //启动的时候发送消息,获取全部报警定义
+        NettyMessage nettyMessage = new NettyMessage(4,null);
+        JSONObject content = new JSONObject();
+        content.put("groupCode", CommonConst.groupCode);
+        content.put("projectId", CommonConst.projectId);
+        nettyMessage.setContent(Arrays.asList(content));
+        nettyClient.sendMessage(nettyMessage);
+    }
+
+
+    public void send(NettyMessage message) {
+        //发送消息
+        ListenableFuture<SendResult<String, Object>> future = kafkaTemplate.send(topicEdgeAlarm, JSONObject.toJSONString(message));
+        future.addCallback(new ListenableFutureCallback<SendResult<String, Object>>() {
+            @Override
+            public void onFailure(Throwable throwable) {
+                //发送失败的处理
+                log.info(topicEdgeAlarm + " - 边缘端 发送消息失败:" + throwable.getMessage());
+            }
+
+            @Override
+            public void onSuccess(SendResult<String, Object> stringObjectSendResult) {
+                //成功的处理
+                log.info(topicEdgeAlarm + " - 边缘端 发送消息成功:" + stringObjectSendResult.toString());
+            }
+        });
+    }
+}

+ 24 - 12
src/main/java/com/persagy/netty/client/NettyClient.java

@@ -5,6 +5,7 @@ import com.alibaba.fastjson.TypeReference;
 import com.persagy.constant.CommonConst;
 import com.persagy.entity.NettyMessage;
 import com.persagy.job.NettyMessageQueue;
+import com.persagy.kafka.KafkaProducer;
 import com.persagy.repository.AlarmRecordRepository;
 import com.persagy.service.AlarmDefineService;
 import io.netty.bootstrap.Bootstrap;
@@ -13,6 +14,7 @@ import io.netty.channel.nio.NioEventLoopGroup;
 import io.netty.channel.socket.nio.NioSocketChannel;
 import lombok.extern.slf4j.Slf4j;
 import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.beans.factory.annotation.Value;
 import org.springframework.stereotype.Component;
 
 @Slf4j
@@ -25,6 +27,11 @@ public class NettyClient {
     AlarmDefineService alarmDefineService;
     @Autowired
     AlarmRecordRepository alarmRecordRepository;
+    @Autowired(required = false)
+    private KafkaProducer kafkaProducer;
+
+    @Value("${spring.kafka.enable:false}")
+    private Boolean kafkaEnable;
 
     public void connect() {
         EventLoopGroup workerGroup = new NioEventLoopGroup();
@@ -58,20 +65,25 @@ public class NettyClient {
 
 
     public void sendMessage(NettyMessage msg) throws InterruptedException {
-        log.info("给云端发送数据:[{}]", msg);
-        if (channel.isWritable()) {
-            try {
-                //!!!!!!注意,NettyMessage<T>是有泛型的,如果把发送一个数据,接收方必须有一个和<T>的全限定类名完全一样的实体类,
-                //发送方不应该限制接收方,所有使用JSONObject.parseObject(msg.toString(), NettyMessage.class)操作让泛型擦除掉
-                channel.writeAndFlush(JSONObject.parseObject(msg.toString(), new TypeReference<NettyMessage<JSONObject>>(){}));
-            } catch (Exception e) {
-                log.error("发送数据异常,放入缓冲队列中", e);
+        if(kafkaEnable){
+            kafkaProducer.send(msg);
+        }else {
+            log.info("给云端发送数据:[{}]", msg);
+            if (channel.isWritable()) {
+                try {
+                    //!!!!!!注意,NettyMessage<T>是有泛型的,如果把发送一个数据,接收方必须有一个和<T>的全限定类名完全一样的实体类,
+                    //发送方不应该限制接收方,所有使用JSONObject.parseObject(msg.toString(), NettyMessage.class)操作让泛型擦除掉
+                    channel.writeAndFlush(JSONObject.parseObject(msg.toString(), new TypeReference<NettyMessage<JSONObject>>() {
+                    }));
+                } catch (Exception e) {
+                    log.error("发送数据异常,放入缓冲队列中", e);
+                    NettyMessageQueue.getNettyMessageQueue().produce(msg);
+                    channel.close();
+                }
+            } else {
+                log.warn("云端netty不可写,放入缓冲队列中[{}]", msg);
                 NettyMessageQueue.getNettyMessageQueue().produce(msg);
-                channel.close();
             }
-        } else {
-            log.warn("云端netty不可写,放入缓冲队列中[{}]", msg);
-            NettyMessageQueue.getNettyMessageQueue().produce(msg);
         }
     }
 

+ 2 - 0
src/main/resources/application.yml

@@ -49,6 +49,8 @@ alarm:
     all:
       alarmdefine:
         cron: 0 0 23 * * ?   #每天23点全量更新一次报警定义
+  kafka:
+    enable: false;
 # 配置日志相关的参考文档,https://github.com/dadiyang/autologging.git
 autolog:
   # 请填写应用名称,必填!