添加websocket功能

ds-bozhou
luyya 2025-05-26 15:43:30 +08:00
parent a2115294cb
commit 0ff37e767e
68 changed files with 2316 additions and 268 deletions

View File

@ -89,12 +89,12 @@
<id>prod</id>
<properties>
<profiles.active>prod</profiles.active>
<nacos.server>10.129.128.114:8848</nacos.server>
<nacos.server>53.176.146.99:8848</nacos.server>
<nacos.discovery.group>DEFAULT_GROUP</nacos.discovery.group>
<nacos.config.group>DEFAULT_GROUP</nacos.config.group>
<nacos.username>nacos</nacos.username>
<nacos.password>nacos</nacos.password>
<logstash.address>10.129.128.114:4560</logstash.address>
<logstash.address>53.176.146.99:4560</logstash.address>
</properties>
</profile>
</profiles>

View File

@ -14,6 +14,7 @@
<module>stwzhj-api-resource</module>
<module>stwzhj-api-workflow</module>
<module>stwzhj-api-data2es</module>
<module>stwzhj-api-location</module>
</modules>
<artifactId>stwzhj-api</artifactId>

View File

@ -47,6 +47,12 @@
<version>${revision}</version>
</dependency>
<dependency>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-api-location</artifactId>
<version>${revision}</version>
</dependency>
</dependencies>
</dependencyManagement>
</project>

View File

@ -0,0 +1,33 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-api</artifactId>
<version>${revision}</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>stwzhj-api-location</artifactId>
<description>
stwzhj-api-location
</description>
<dependencies>
<!-- stwzhj Common Core-->
<dependency>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-common-core</artifactId>
</dependency>
<dependency>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-common-excel</artifactId>
</dependency>
</dependencies>
</project>

View File

@ -0,0 +1,8 @@
package org.dromara.location.api;
import java.util.List;
public interface RemoteElasticSearchService {
List<String> linstenDataStatus();
}

View File

@ -1,18 +1,18 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="60 seconds" debug="false">
<!-- 日志存放路径 -->
<property name="log.path" value="logs/${project.artifactId}"/>
<property name="log.path" value="logs/${project.artifactId}" />
<!-- 日志输出格式 -->
<property name="console.log.pattern"
value="%red(%d{yyyy-MM-dd HH:mm:ss}) %green([%thread]) %highlight(%-5level) %boldMagenta(%logger{36}%n) - %msg%n"/>
<!-- 控制台输出 -->
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${console.log.pattern}</pattern>
<charset>utf-8</charset>
</encoder>
</appender>
<!-- &lt;!&ndash; 控制台输出 &ndash;&gt;
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${console.log.pattern}</pattern>
<charset>utf-8</charset>
</encoder>
</appender>-->
<include resource="logback-common.xml" />
@ -21,8 +21,66 @@
<!-- 开启 skywalking 日志收集 -->
<include resource="logback-skylog.xml" />
<!--系统操作日志-->
<root level="info">
<appender-ref ref="console"/>
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--如果只是想要 Info 级别的日志,只是过滤 info 还是会输出 Error 日志,因为 Error 的级别高,
所以我们使用下面的策略,可以避免输出 Error 的日志-->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<!--过滤 Error-->
<level>ERROR</level>
<!--匹配到就禁止-->
<onMatch>DENY</onMatch>
<!--没有匹配到就允许-->
<onMismatch>ACCEPT</onMismatch>
</filter>
<!--日志名称如果没有File 属性那么只会使用FileNamePattern的文件路径规则
如果同时有<File><FileNamePattern>,那么当天日志是<File>,明天会自动把今天
的日志改名为今天的日期。即,<File> 的日志都是当天的。
-->
<File>${LOG_PATH}${LOG_FILE}</File>
<encoder>
<charset>UTF-8</charset>
<pattern>%date [%level] [%thread] %logger{60} [%file : %line] %msg%n</pattern>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_PATH}info/${LOG_FILE}.%d{yyyy-MM-dd}.%i.gz</fileNamePattern>
<maxFileSize>50MB</maxFileSize>
<maxHistory>20</maxHistory> <!-- 保留180天 -->
</rollingPolicy>
</appender>
<!--error log-->
<appender name="ERRORFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--如果只是想要 Error 级别的日志,那么需要过滤一下,默认是 info 级别的ThresholdFilter-->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>Error</level>
</filter>
<!--日志名称如果没有File 属性那么只会使用FileNamePattern的文件路径规则
如果同时有<File><FileNamePattern>,那么当天日志是<File>,明天会自动把今天
的日志改名为今天的日期。即,<File> 的日志都是当天的。
-->
<File>${LOG_PATH}error.${LOG_FILE}</File>
<!--滚动策略,按照时间滚动 TimeBasedRollingPolicy-->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy ">
<!--文件路径,定义了日志的切分方式——把每一天的日志归档到一个文件中,以防止日志填满整个磁盘空间-->
<FileNamePattern>${LOG_PATH}error/${LOG_FILE}.%d{yyyy-MM-dd}.%i.gz</FileNamePattern>
<!--只保留最近90天的日志-->
<maxFileSize>50MB</maxFileSize>
<maxHistory>180</maxHistory>
<!--用来指定日志文件的上限大小,那么到了这个值,就会删除旧的日志-->
<!--<totalSizeCap>1GB</totalSizeCap>-->
</rollingPolicy>
<!--日志输出编码格式化-->
<encoder>
<charset>UTF-8</charset>
<pattern>%date [%level] [%thread] %logger{60} [%file : %line] %msg%n</pattern>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="CONSOLE"/>
<appender-ref ref="FILE"/>
<appender-ref ref="ERRORFILE"/>
</root>
</configuration>

View File

@ -25,12 +25,12 @@ dubbo:
username: dubbo
password: ruoyi123
# 集群开关
sentinel: false
sentinel: true
parameters:
namespace: ${spring.profiles.active}
database: ${spring.data.redis.database}
timeout: ${spring.data.redis.timeout}
backup: 10.129.128.116:26380,10.129.128.115:26380,10.129.128.114:26380
backup: 53.176.146.98:26380,53.176.146.99:26380,53.176.146.100:26380
# metadata-report:
# address: redis://${spring.data.redis.host}:${spring.data.redis.port}
# group: DUBBO_GROUP

View File

@ -751,4 +751,12 @@ public class RedisUtils {
return count1;
}
// 查询半径周边 米内的成员
public static List<String> nearByXYReadonly(double centerLon,double centerLat,double distance){
RGeo<String> geo = CLIENT.getGeo(RedisConstants.ONLINE_USERS_GEO);
List<String> members = geo.radius(centerLon, centerLat, distance, GeoUnit.METERS);
return members;
}
}

View File

@ -56,6 +56,11 @@ public class PlusWebSocketHandler extends AbstractWebSocketHandler {
WebSocketUtils.publishMessage(webSocketMessageDto);
}
protected void handleStringMessage(WebSocketSession session, String message) throws Exception {
// 从WebSocket会话中获取登录用户信息
WebSocketUtils.sendMessage(session,message);
}
/**
*
*

View File

@ -2,6 +2,7 @@ package org.dromara.common.websocket.holder;
import lombok.AccessLevel;
import lombok.NoArgsConstructor;
import org.springframework.web.socket.CloseStatus;
import org.springframework.web.socket.WebSocketSession;
import java.util.Map;
@ -25,6 +26,7 @@ public class WebSocketSessionHolder {
* @param session WebSocket
*/
public static void addSession(Long sessionKey, WebSocketSession session) {
removeSession(sessionKey);
USER_SESSION_MAP.put(sessionKey, session);
}
@ -34,8 +36,10 @@ public class WebSocketSessionHolder {
* @param sessionKey
*/
public static void removeSession(Long sessionKey) {
if (USER_SESSION_MAP.containsKey(sessionKey)) {
USER_SESSION_MAP.remove(sessionKey);
WebSocketSession session = USER_SESSION_MAP.remove(sessionKey);
try {
session.close(CloseStatus.BAD_DATA);
} catch (Exception ignored) {
}
}

View File

@ -1,5 +1,7 @@
package org.dromara.gateway.filter;
import cn.dev33.satoken.stp.StpUtil;
import lombok.extern.slf4j.Slf4j;
import org.springframework.core.Ordered;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpMethod;
@ -20,6 +22,7 @@ import reactor.core.publisher.Mono;
* @author Lion Li
*/
@Component
@Slf4j
public class GlobalCorsFilter implements WebFilter, Ordered {
/**
@ -60,8 +63,18 @@ public class GlobalCorsFilter implements WebFilter, Ordered {
@Override
public Mono<Void> filter(ServerWebExchange exchange, WebFilterChain chain) {
ServerHttpRequest request = exchange.getRequest();
// 判断请求是否为跨域请求
if (CorsUtils.isCorsRequest(request)) {
// 仅拦截 WebSocket 握手请求
/*if (isWebSocketHandshake(exchange.getRequest())) {
// 从请求中提取 Token支持 URL 参数或 Header
String token = extractToken(exchange.getRequest());
// 验证 Token 有效性
if (!validateToken(token)) {
log.info("webSocket认证过期Token={}", token);
return unauthorizedResponse(exchange, "Invalid token");
}
}else */if (CorsUtils.isCorsRequest(request)) { // 判断请求是否为跨域请求
ServerHttpResponse response = exchange.getResponse();
HttpHeaders headers = response.getHeaders();
headers.add("Access-Control-Allow-Headers", ALLOWED_HEADERS);
@ -83,4 +96,37 @@ public class GlobalCorsFilter implements WebFilter, Ordered {
public int getOrder() {
return Ordered.HIGHEST_PRECEDENCE;
}
// 判断是否为 WebSocket 握手请求
private boolean isWebSocketHandshake(ServerHttpRequest request) {
String upgradeHeader = request.getHeaders().getFirst("Upgrade");
return "websocket".equalsIgnoreCase(upgradeHeader);
}
// 提取 Token优先从 Header其次 URL 参数)
private String extractToken(ServerHttpRequest request) {
// 从 Header 获取
String authHeader = request.getHeaders().getFirst(HttpHeaders.AUTHORIZATION);
if (authHeader != null && authHeader.startsWith("Bearer ")) {
return authHeader.substring(7);
}
// 从 URL 参数获取
return request.getQueryParams().getFirst("token");
}
// Token 验证逻辑(示例:调用认证服务或本地解析 JWT
private boolean validateToken(String token) {
// 示例:调用认证服务接口或本地校验 JWT
return token != null && !token.isEmpty() && StpUtil.getTokenTimeout(token) > 0;
}
// 返回 401 未授权响应
private Mono<Void> unauthorizedResponse(ServerWebExchange exchange, String message) {
exchange.getResponse().setStatusCode(HttpStatus.UNAUTHORIZED);
exchange.getResponse().getHeaders().add("Content-Type", "text/plain");
return exchange.getResponse().writeWith(
Mono.just(exchange.getResponse().bufferFactory().wrap(message.getBytes()))
);
}
}

View File

@ -43,7 +43,6 @@ public class GlobalLogFilter implements GlobalFilter, Ordered {
ServerHttpRequest request = exchange.getRequest();
String path = WebFluxUtils.getOriginalRequestUrl(exchange);
String url = request.getMethod().name() + " " + path;
// 打印请求参数
if (WebFluxUtils.isJsonRequest(exchange)) {
if (apiDecryptProperties.getEnabled()

View File

@ -0,0 +1,65 @@
package org.dromara.gateway.filter;
import org.springframework.cloud.gateway.filter.GatewayFilterChain;
import org.springframework.cloud.gateway.filter.GlobalFilter;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.annotation.Order;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpStatus;
import org.springframework.http.server.reactive.ServerHttpRequest;
import org.springframework.stereotype.Component;
import org.springframework.web.server.ServerWebExchange;
import reactor.core.publisher.Mono;
//@Configuration
//@Order(-1) // 高优先级
public class WebSocketAuthFilter implements GlobalFilter {
@Override
public Mono<Void> filter(ServerWebExchange exchange, GatewayFilterChain chain) {
// 仅拦截 WebSocket 握手请求
if (isWebSocketHandshake(exchange.getRequest())) {
// 从请求中提取 Token支持 URL 参数或 Header
String token = extractToken(exchange.getRequest());
// 验证 Token 有效性
if (!validateToken(token)) {
return unauthorizedResponse(exchange, "Invalid token");
}
}
return chain.filter(exchange);
}
// 判断是否为 WebSocket 握手请求
private boolean isWebSocketHandshake(ServerHttpRequest request) {
String upgradeHeader = request.getHeaders().getFirst("Upgrade");
return "websocket".equalsIgnoreCase(upgradeHeader);
}
// 提取 Token优先从 Header其次 URL 参数)
private String extractToken(ServerHttpRequest request) {
// 从 Header 获取
String authHeader = request.getHeaders().getFirst(HttpHeaders.AUTHORIZATION);
if (authHeader != null && authHeader.startsWith("Bearer ")) {
return authHeader.substring(7);
}
// 从 URL 参数获取
return request.getQueryParams().getFirst("token");
}
// Token 验证逻辑(示例:调用认证服务或本地解析 JWT
private boolean validateToken(String token) {
// 示例:调用认证服务接口或本地校验 JWT
return token != null && !token.isEmpty();
}
// 返回 401 未授权响应
private Mono<Void> unauthorizedResponse(ServerWebExchange exchange, String message) {
exchange.getResponse().setStatusCode(HttpStatus.UNAUTHORIZED);
exchange.getResponse().getHeaders().add("Content-Type", "text/plain");
return exchange.getResponse().writeWith(
Mono.just(exchange.getResponse().bufferFactory().wrap(message.getBytes()))
);
}
}

View File

@ -1,114 +1,86 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="60 seconds" debug="false">
<!-- 日志存放路径 -->
<property name="log.path" value="logs/${project.artifactId}"/>
<property name="log.path" value="logs/${project.artifactId}" />
<!-- 日志输出格式 -->
<property name="console.log.pattern"
value="%red(%d{yyyy-MM-dd HH:mm:ss}) %green([%thread]) %highlight(%-5level) %boldMagenta(%logger{36}%n) - %msg%n"/>
<property name="log.pattern" value="%d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger{36} - %msg%n"/>
<!-- 控制台输出 -->
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${console.log.pattern}</pattern>
<charset>utf-8</charset>
</encoder>
</appender>
<!-- &lt;!&ndash; 控制台输出 &ndash;&gt;
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${console.log.pattern}</pattern>
<charset>utf-8</charset>
</encoder>
</appender>-->
<!-- 控制台输出 -->
<appender name="file_console" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/console.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!-- 日志文件名格式 -->
<fileNamePattern>${log.path}/console.%d{yyyy-MM-dd}.log</fileNamePattern>
<!-- 日志最大 1天 -->
<maxHistory>1</maxHistory>
</rollingPolicy>
<encoder>
<pattern>${log.pattern}</pattern>
<charset>utf-8</charset>
</encoder>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<!-- 过滤的级别 -->
<level>INFO</level>
</filter>
</appender>
<!-- 系统日志输出 -->
<appender name="file_info" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/info.log</file>
<!-- 循环政策:基于时间创建日志文件 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!-- 日志文件名格式 -->
<fileNamePattern>${log.path}/info.%d{yyyy-MM-dd}.log</fileNamePattern>
<!-- 日志最大的历史 60天 -->
<maxHistory>60</maxHistory>
</rollingPolicy>
<encoder>
<pattern>${log.pattern}</pattern>
</encoder>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<!-- 过滤的级别 -->
<level>INFO</level>
<!-- 匹配时的操作:接收(记录) -->
<onMatch>ACCEPT</onMatch>
<!-- 不匹配时的操作:拒绝(不记录) -->
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<appender name="file_error" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/error.log</file>
<!-- 循环政策:基于时间创建日志文件 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!-- 日志文件名格式 -->
<fileNamePattern>${log.path}/error.%d{yyyy-MM-dd}.log</fileNamePattern>
<!-- 日志最大的历史 60天 -->
<maxHistory>60</maxHistory>
</rollingPolicy>
<encoder>
<pattern>${log.pattern}</pattern>
</encoder>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<!-- 过滤的级别 -->
<level>ERROR</level>
<!-- 匹配时的操作:接收(记录) -->
<onMatch>ACCEPT</onMatch>
<!-- 不匹配时的操作:拒绝(不记录) -->
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!-- info异步输出 -->
<appender name="async_info" class="ch.qos.logback.classic.AsyncAppender">
<!-- 不丢失日志.默认的,如果队列的80%已满,则会丢弃TRACT、DEBUG、INFO级别的日志 -->
<discardingThreshold>0</discardingThreshold>
<!-- 更改默认的队列的深度,该值会影响性能.默认值为256 -->
<queueSize>512</queueSize>
<!-- 添加附加的appender,最多只能添加一个 -->
<appender-ref ref="file_info"/>
</appender>
<!-- error异步输出 -->
<appender name="async_error" class="ch.qos.logback.classic.AsyncAppender">
<!-- 不丢失日志.默认的,如果队列的80%已满,则会丢弃TRACT、DEBUG、INFO级别的日志 -->
<discardingThreshold>0</discardingThreshold>
<!-- 更改默认的队列的深度,该值会影响性能.默认值为256 -->
<queueSize>512</queueSize>
<!-- 添加附加的appender,最多只能添加一个 -->
<appender-ref ref="file_error"/>
</appender>
<include resource="logback-common.xml" />
<include resource="logback-logstash.xml" />
<!-- 开启 skywalking 日志收集 -->
<include resource="logback-skylog.xml" />
<!--系统操作日志-->
<root level="info">
<appender-ref ref="console"/>
<appender-ref ref="async_info"/>
<appender-ref ref="async_error"/>
<appender-ref ref="file_console"/>
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--如果只是想要 Info 级别的日志,只是过滤 info 还是会输出 Error 日志,因为 Error 的级别高,
所以我们使用下面的策略,可以避免输出 Error 的日志-->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<!--过滤 Error-->
<level>ERROR</level>
<!--匹配到就禁止-->
<onMatch>DENY</onMatch>
<!--没有匹配到就允许-->
<onMismatch>ACCEPT</onMismatch>
</filter>
<!--日志名称如果没有File 属性那么只会使用FileNamePattern的文件路径规则
如果同时有<File><FileNamePattern>,那么当天日志是<File>,明天会自动把今天
的日志改名为今天的日期。即,<File> 的日志都是当天的。
-->
<File>${LOG_PATH}${LOG_FILE}</File>
<encoder>
<charset>UTF-8</charset>
<pattern>%date [%level] [%thread] %logger{60} [%file : %line] %msg%n</pattern>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_PATH}info/${LOG_FILE}.%d{yyyy-MM-dd}.%i.gz</fileNamePattern>
<maxFileSize>50MB</maxFileSize>
<maxHistory>20</maxHistory> <!-- 保留180天 -->
</rollingPolicy>
</appender>
<!--error log-->
<appender name="ERRORFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--如果只是想要 Error 级别的日志,那么需要过滤一下,默认是 info 级别的ThresholdFilter-->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>Error</level>
</filter>
<!--日志名称如果没有File 属性那么只会使用FileNamePattern的文件路径规则
如果同时有<File><FileNamePattern>,那么当天日志是<File>,明天会自动把今天
的日志改名为今天的日期。即,<File> 的日志都是当天的。
-->
<File>${LOG_PATH}error.${LOG_FILE}</File>
<!--滚动策略,按照时间滚动 TimeBasedRollingPolicy-->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy ">
<!--文件路径,定义了日志的切分方式——把每一天的日志归档到一个文件中,以防止日志填满整个磁盘空间-->
<FileNamePattern>${LOG_PATH}error/${LOG_FILE}.%d{yyyy-MM-dd}.%i.gz</FileNamePattern>
<!--只保留最近90天的日志-->
<maxFileSize>50MB</maxFileSize>
<maxHistory>180</maxHistory>
<!--用来指定日志文件的上限大小,那么到了这个值,就会删除旧的日志-->
<!--<totalSizeCap>1GB</totalSizeCap>-->
</rollingPolicy>
<!--日志输出编码格式化-->
<encoder>
<charset>UTF-8</charset>
<pattern>%date [%level] [%thread] %logger{60} [%file : %line] %msg%n</pattern>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="CONSOLE"/>
<appender-ref ref="FILE"/>
<appender-ref ref="ERRORFILE"/>
</root>
</configuration>

View File

@ -19,6 +19,7 @@
<module>stwzhj-consumer</module>
<module>stwzhj-location</module>
<module>stwzhj-dataToGas</module>
<module>stwzhj-kafkaToWebsocket</module>
<module>wzhj-webscoket</module>
<module>wzhj-extract</module>
<module>wzhj-udp</module>

View File

@ -52,14 +52,27 @@ public class ConsumerWorker {
"auto.offset.reset:latest"})
public void consumer(ConsumerRecord<String,Object> record) {
Object value = record.value();
String topic = record.topic();
EsGpsInfo esGpsInfo = JSONUtil.toBean((String) value, EsGpsInfo.class);
Date gpsTime = esGpsInfo.getGpsTime();
// log.info("value={}",value);
if(Objects.isNull(gpsTime)){
log.error("gpsTime == null,deviceCode={}",esGpsInfo.getDeviceCode());
return;
}
String deviceType = esGpsInfo.getDeviceType();
if(StringUtils.isBlank(deviceType)){
log.error("deviceType is null, deviceCode={}",esGpsInfo.getDeviceCode());
return;
}
if(DateUtil.between(gpsTime,new Date(), DateUnit.MINUTE) < 30){
esGpsInfo.setOnline(1);
}
if ("jysb_dwxx".equals(topic)){ //定位信息
// logger.info("offset={},topic={},value={}", record.offset(), topic,value);
luanrequest(value);
} else if ("jysb_sbxx".equals(topic)) { //基础信息
// logger.info("offset={},topic={},value={}", record.offset(), topic,value);
baseDataRequest(value);
logger.info("esGpsInfo={}",esGpsInfo);
boolean offer = linkedBlockingDeque.offer(esGpsInfo);
R response = R.ok(offer);
if(Objects.isNull(response)){
logger.info("response == null");
}

View File

@ -40,7 +40,7 @@ public class DataInsertBatchHandler implements CommandLineRunner {
public void run(String... args) throws Exception {
ExecutorService singleThreadExecutor = Executors.newSingleThreadExecutor();
LinkedBlockingDeque linkedBlockingDeque = ConsumerWorker.linkedBlockingDeque; //定位信息队列
LinkedBlockingDeque baseDataDeque = ConsumerWorker.basedataDeque; //基础信息队列
// LinkedBlockingDeque baseDataDeque = ConsumerWorker.basedataDeque; //基础信息队列
singleThreadExecutor.execute(new Runnable() {
@Override
public void run() {
@ -49,15 +49,15 @@ public class DataInsertBatchHandler implements CommandLineRunner {
List<RemoteGpsInfo> list = new ArrayList<>();
List<RemoteDeviceBo> bases = new ArrayList<>();
Queues.drain(linkedBlockingDeque, list, 200, 5, TimeUnit.SECONDS);
Queues.drain(baseDataDeque, bases, 100, 5, TimeUnit.SECONDS);
// Queues.drain(baseDataDeque, bases, 100, 5, TimeUnit.SECONDS);
log.info("batch size={}", list.size());
log.info("basedata size={}", bases.size());
// log.info("basedata size={}", bases.size());
if(CollectionUtil.isNotEmpty(list)) {
gpsService.saveDataBatch(list);
}
if(CollectionUtil.isNotEmpty(bases)) {
/*if(CollectionUtil.isNotEmpty(bases)) {
deviceService.batchSaveDevice(bases);
}
}*/
} catch (Exception e) {
log.error("缓存队列批量消费异常:{}", e.getMessage());
}

View File

@ -6,13 +6,13 @@
<property name="console.log.pattern"
value="%red(%d{yyyy-MM-dd HH:mm:ss}) %green([%thread]) %highlight(%-5level) %boldMagenta(%logger{36}%n) - %msg%n"/>
<!-- 控制台输出 -->
<!-- &lt;!&ndash; 控制台输出 &ndash;&gt;
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${console.log.pattern}</pattern>
<charset>utf-8</charset>
</encoder>
</appender>
</appender>-->
<include resource="logback-common.xml" />
@ -21,8 +21,66 @@
<!-- 开启 skywalking 日志收集 -->
<include resource="logback-skylog.xml" />
<!--系统操作日志-->
<root level="info">
<appender-ref ref="console" />
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--如果只是想要 Info 级别的日志,只是过滤 info 还是会输出 Error 日志,因为 Error 的级别高,
所以我们使用下面的策略,可以避免输出 Error 的日志-->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<!--过滤 Error-->
<level>ERROR</level>
<!--匹配到就禁止-->
<onMatch>DENY</onMatch>
<!--没有匹配到就允许-->
<onMismatch>ACCEPT</onMismatch>
</filter>
<!--日志名称如果没有File 属性那么只会使用FileNamePattern的文件路径规则
如果同时有<File><FileNamePattern>,那么当天日志是<File>,明天会自动把今天
的日志改名为今天的日期。即,<File> 的日志都是当天的。
-->
<File>${LOG_PATH}${LOG_FILE}</File>
<encoder>
<charset>UTF-8</charset>
<pattern>%date [%level] [%thread] %logger{60} [%file : %line] %msg%n</pattern>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_PATH}info/${LOG_FILE}.%d{yyyy-MM-dd}.%i.gz</fileNamePattern>
<maxFileSize>50MB</maxFileSize>
<maxHistory>20</maxHistory> <!-- 保留180天 -->
</rollingPolicy>
</appender>
<!--error log-->
<appender name="ERRORFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--如果只是想要 Error 级别的日志,那么需要过滤一下,默认是 info 级别的ThresholdFilter-->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>Error</level>
</filter>
<!--日志名称如果没有File 属性那么只会使用FileNamePattern的文件路径规则
如果同时有<File><FileNamePattern>,那么当天日志是<File>,明天会自动把今天
的日志改名为今天的日期。即,<File> 的日志都是当天的。
-->
<File>${LOG_PATH}error.${LOG_FILE}</File>
<!--滚动策略,按照时间滚动 TimeBasedRollingPolicy-->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy ">
<!--文件路径,定义了日志的切分方式——把每一天的日志归档到一个文件中,以防止日志填满整个磁盘空间-->
<FileNamePattern>${LOG_PATH}error/${LOG_FILE}.%d{yyyy-MM-dd}.%i.gz</FileNamePattern>
<!--只保留最近90天的日志-->
<maxFileSize>50MB</maxFileSize>
<maxHistory>180</maxHistory>
<!--用来指定日志文件的上限大小,那么到了这个值,就会删除旧的日志-->
<!--<totalSizeCap>1GB</totalSizeCap>-->
</rollingPolicy>
<!--日志输出编码格式化-->
<encoder>
<charset>UTF-8</charset>
<pattern>%date [%level] [%thread] %logger{60} [%file : %line] %msg%n</pattern>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="CONSOLE"/>
<appender-ref ref="FILE"/>
<appender-ref ref="ERRORFILE"/>
</root>
</configuration>

View File

@ -53,7 +53,7 @@ public class RedisExpireListener extends KeyExpirationEventMessageListener {
public void onMessage(Message message, byte[] pattern) {
String expireKey = message.toString();
if(StringUtils.isNotEmpty(expireKey) &&
expireKey.startsWith(RedisConstants.ORG_CODE_PRE)){
expireKey.startsWith(RedisConstants.ONLINE_USERS_TEN)){
handleExpiredEvent(expireKey);
}
}
@ -85,7 +85,10 @@ public class RedisExpireListener extends KeyExpirationEventMessageListener {
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
lock.unlock();
// 仅在当前线程持有锁时释放
if (lock.isHeldByCurrentThread()) {
lock.unlock();
}
}
}

View File

@ -1,18 +1,18 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="60 seconds" debug="false">
<!-- 日志存放路径 -->
<property name="log.path" value="logs/${project.artifactId}" />
<!-- 日志输出格式 -->
<property name="log.path" value="logs/${project.artifactId}" />
<!-- 日志输出格式 -->
<property name="console.log.pattern"
value="%red(%d{yyyy-MM-dd HH:mm:ss}) %green([%thread]) %highlight(%-5level) %boldMagenta(%logger{36}%n) - %msg%n"/>
<!-- 控制台输出 -->
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${console.log.pattern}</pattern>
<charset>utf-8</charset>
</encoder>
</appender>
<!-- &lt;!&ndash; 控制台输出 &ndash;&gt;
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${console.log.pattern}</pattern>
<charset>utf-8</charset>
</encoder>
</appender>-->
<include resource="logback-common.xml" />
@ -21,8 +21,66 @@
<!-- 开启 skywalking 日志收集 -->
<include resource="logback-skylog.xml" />
<!--系统操作日志-->
<root level="info">
<appender-ref ref="console" />
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--如果只是想要 Info 级别的日志,只是过滤 info 还是会输出 Error 日志,因为 Error 的级别高,
所以我们使用下面的策略,可以避免输出 Error 的日志-->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<!--过滤 Error-->
<level>ERROR</level>
<!--匹配到就禁止-->
<onMatch>DENY</onMatch>
<!--没有匹配到就允许-->
<onMismatch>ACCEPT</onMismatch>
</filter>
<!--日志名称如果没有File 属性那么只会使用FileNamePattern的文件路径规则
如果同时有<File><FileNamePattern>,那么当天日志是<File>,明天会自动把今天
的日志改名为今天的日期。即,<File> 的日志都是当天的。
-->
<File>${LOG_PATH}${LOG_FILE}</File>
<encoder>
<charset>UTF-8</charset>
<pattern>%date [%level] [%thread] %logger{60} [%file : %line] %msg%n</pattern>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_PATH}info/${LOG_FILE}.%d{yyyy-MM-dd}.%i.gz</fileNamePattern>
<maxFileSize>50MB</maxFileSize>
<maxHistory>20</maxHistory> <!-- 保留180天 -->
</rollingPolicy>
</appender>
<!--error log-->
<appender name="ERRORFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--如果只是想要 Error 级别的日志,那么需要过滤一下,默认是 info 级别的ThresholdFilter-->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>Error</level>
</filter>
<!--日志名称如果没有File 属性那么只会使用FileNamePattern的文件路径规则
如果同时有<File><FileNamePattern>,那么当天日志是<File>,明天会自动把今天
的日志改名为今天的日期。即,<File> 的日志都是当天的。
-->
<File>${LOG_PATH}error.${LOG_FILE}</File>
<!--滚动策略,按照时间滚动 TimeBasedRollingPolicy-->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy ">
<!--文件路径,定义了日志的切分方式——把每一天的日志归档到一个文件中,以防止日志填满整个磁盘空间-->
<FileNamePattern>${LOG_PATH}error/${LOG_FILE}.%d{yyyy-MM-dd}.%i.gz</FileNamePattern>
<!--只保留最近90天的日志-->
<maxFileSize>50MB</maxFileSize>
<maxHistory>180</maxHistory>
<!--用来指定日志文件的上限大小,那么到了这个值,就会删除旧的日志-->
<!--<totalSizeCap>1GB</totalSizeCap>-->
</rollingPolicy>
<!--日志输出编码格式化-->
<encoder>
<charset>UTF-8</charset>
<pattern>%date [%level] [%thread] %logger{60} [%file : %line] %msg%n</pattern>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="CONSOLE"/>
<appender-ref ref="FILE"/>
<appender-ref ref="ERRORFILE"/>
</root>
</configuration>

View File

@ -0,0 +1,139 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-modules</artifactId>
<version>${revision}</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>stwzhj-kafkaToWebsocket</artifactId>
<description>
stwzhj-kafkaToWebsocket kafka消息发送到Websocket
</description>
<dependencies>
<dependency>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-common-nacos</artifactId>
</dependency>
<dependency>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-common-sentinel</artifactId>
</dependency>
<!-- RuoYi Common Log -->
<dependency>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-common-log</artifactId>
</dependency>
<dependency>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-common-dict</artifactId>
</dependency>
<dependency>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-common-doc</artifactId>
</dependency>
<dependency>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-common-web</artifactId>
</dependency>
<dependency>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-common-dubbo</artifactId>
</dependency>
<dependency>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-common-seata</artifactId>
</dependency>
<dependency>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-common-idempotent</artifactId>
</dependency>
<dependency>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-common-tenant</artifactId>
</dependency>
<dependency>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-common-security</artifactId>
</dependency>
<dependency>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-common-translation</artifactId>
</dependency>
<dependency>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-common-sensitive</artifactId>
</dependency>
<dependency>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-common-encrypt</artifactId>
</dependency>
<!-- RuoYi Api System -->
<dependency>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-api-system</artifactId>
</dependency>
<dependency>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-api-resource</artifactId>
</dependency>
<dependency>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-api-location</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-websocket</artifactId>
</dependency>
<!-- kafka -->
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
</dependency>
</dependencies>
<build>
<finalName>${project.artifactId}</finalName>
<plugins>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
<version>${spring-boot.version}</version>
<executions>
<execution>
<goals>
<goal>repackage</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,26 @@
package org.dromara.kafka2Websocket;
import org.apache.dubbo.config.spring.context.annotation.EnableDubbo;
import org.dromara.kafka2Websocket.config.KafkaProperties;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.context.metrics.buffering.BufferingApplicationStartup;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.scheduling.annotation.EnableScheduling;
@EnableDubbo
@EnableScheduling
@SpringBootApplication
@EnableConfigurationProperties(KafkaProperties.class)
public class KafkaToSocketApplication {
public static void main(String[] args) {
SpringApplication application = new SpringApplication(KafkaToSocketApplication.class);
application.setApplicationStartup(new BufferingApplicationStartup(2048));
application.run(args);
System.out.println("(♥◠‿◠)ノ゙ Socket启动成功 ლ(´ڡ`ლ)゙ ");
}
}

View File

@ -0,0 +1,66 @@
package org.dromara.kafka2Websocket.config;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import org.springframework.kafka.listener.ConsumerAwareListenerErrorHandler;
import org.springframework.kafka.listener.ContainerProperties;
import java.util.HashMap;
import java.util.Map;
// 1、kafka配置类
@Configuration
@Slf4j
public class KafkaConfig {
@Bean
public ConsumerFactory<String, String> consumerFactory(KafkaProperties properties) {
Map<String, Object> config = new HashMap<>();
config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, properties.getBootstrapServers());
config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
config.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
config.put(ConsumerConfig.GROUP_ID_CONFIG,properties.getGroupId());
return new DefaultKafkaConsumerFactory<>(config);
}
@Bean
public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactory(
ConsumerFactory<String, String> consumerFactory) {
ConcurrentKafkaListenerContainerFactory<String, String> factory =
new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory);
factory.setBatchListener(true);
factory.setConcurrency(3);
// 核心配置:设置手动提交模式
// factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL);
return factory;
}
/**
*
*
* @return
*/
@Bean
public ConsumerAwareListenerErrorHandler consumerAwareListenerErrorHandler() {
return (message, exception, consumer) -> {
log.error("消息{} , 异常原因{}", message, exception.getMessage());
log.error("consumerAwareListenerErrorHandler called");
return null;
};
}
}

View File

@ -0,0 +1,17 @@
package org.dromara.kafka2Websocket.config;
import lombok.Data;
import org.springframework.boot.context.properties.ConfigurationProperties;
import java.util.List;
// 2. 配置属性KafkaProperties.java
@Data
@ConfigurationProperties(prefix = "app.kafka")
public class KafkaProperties {
private String bootstrapServers;
private List<String> topics;
private String groupId;
private int concurrency = 3;
}

View File

@ -0,0 +1,58 @@
package org.dromara.kafka2Websocket.config;
import cn.hutool.core.util.StrUtil;
import jakarta.websocket.OnOpen;
import org.dromara.kafka2Websocket.dto.SharedState;
import org.dromara.kafka2Websocket.handle.KafkaWebSocketHandler;
import org.dromara.kafka2Websocket.interceptor.AuthInterceptor;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.web.socket.WebSocketHandler;
import org.springframework.web.socket.config.annotation.EnableWebSocket;
import org.springframework.web.socket.config.annotation.WebSocketConfigurer;
import org.springframework.web.socket.server.HandshakeInterceptor;
// 3. WebSocket配置WebSocketConfig.java
@Configuration
@EnableWebSocket
public class WebSocketConfig {
@Bean
public WebSocketConfigurer webSocketConfigurer(HandshakeInterceptor handshakeInterceptor,
WebSocketHandler webSocketHandler) {
// 如果WebSocket的路径为空则设置默认路径为 "/websocket"
// webSocketProperties.setPath("ws/websocket");
// webSocketProperties.setAllowedOrigins("*");
/* if (StrUtil.isBlank(webSocketProperties.getPath())) {
webSocketProperties.setPath("/websocket");
}*/
// 如果允许跨域访问的地址为空,则设置为 "*",表示允许所有来源的跨域请求
/* if (StrUtil.isBlank(webSocketProperties.getAllowedOrigins())) {
webSocketProperties.setAllowedOrigins("*");
}
*/
// 返回一个WebSocketConfigurer对象用于配置WebSocket
return registry -> registry
// 添加WebSocket处理程序和拦截器到指定路径设置允许的跨域来源
.addHandler(webSocketHandler,"ws/websocket")
.addInterceptors(handshakeInterceptor)
.setAllowedOrigins("*");
}
@Bean
public HandshakeInterceptor handshakeInterceptor() {
return new AuthInterceptor();
}
@Bean
public WebSocketHandler webSocketHandler() {
return new KafkaWebSocketHandler();
}
}

View File

@ -0,0 +1,26 @@
package org.dromara.kafka2Websocket.config;
import lombok.Data;
import org.springframework.boot.context.properties.ConfigurationProperties;
/**
* WebSocket
*
* @author zendwang
*/
//@ConfigurationProperties("websocket")
@Data
public class WebSocketProperties {
private Boolean enabled;
/**
*
*/
private String path;
/**
* 访
*/
private String allowedOrigins;
}

View File

@ -0,0 +1,113 @@
package org.dromara.kafka2Websocket.consumer;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.dromara.common.satoken.utils.LoginHelper;
import org.dromara.kafka2Websocket.config.KafkaProperties;
import org.dromara.kafka2Websocket.handle.KafkaWebSocketHandler;
import org.dromara.system.api.model.LoginUser;
import org.springframework.context.SmartLifecycle;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.core.KafkaTemplate;
import java.time.Duration;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicBoolean;
//@Configuration
//@Slf4j
public class KafkaConsumerManager implements SmartLifecycle {
private final AtomicBoolean running = new AtomicBoolean(false);
private final KafkaProperties properties;
private final KafkaWebSocketHandler webSocketHandler;
private final KafkaTemplate<String, String> kafkaTemplate;
private KafkaConsumer<String, String> consumer;
private Thread consumerThread;
public KafkaConsumerManager(KafkaProperties properties,
KafkaWebSocketHandler webSocketHandler,
KafkaTemplate<String, String> kafkaTemplate) {
this.properties = properties;
this.webSocketHandler = webSocketHandler;
this.kafkaTemplate = kafkaTemplate;
}
@Override
public void start() {
if (running.compareAndSet(false, true)) {
initializeConsumer();
consumerThread = new Thread(this::consumeMessages);
consumerThread.start();
}
}
private void initializeConsumer() {
Properties props = new Properties();
props.put("bootstrap.servers", properties.getBootstrapServers());
props.put("group.id", properties.getGroupId());
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("enable.auto.commit", "false");
props.put("auto.offset.reset", "earliest");
consumer = new KafkaConsumer<>(props);
consumer.subscribe(properties.getTopics());
}
private void consumeMessages() {
try {
//running.get() && webSocketHandler.hasConnections()
while (true) {
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
processRecords(records);
consumer.commitSync();
}
} catch (Exception e) {
// 处理异常
} finally {
consumer.close();
}
}
private void processRecords(ConsumerRecords<String, String> records) {
LoginUser user = LoginHelper.getLoginUser();
records.forEach(record -> {
try {
// String formatted = formatMessage(record.topic(), record.value());
webSocketHandler.broadcast(record.value(), user.getManageDeptId());
} catch (Exception e) {
handleFailedMessage(record);
}
});
}
private String formatMessage(String topic, String payload) {
return String.format("{\"topic\":\"%s\",\"data\":%s}", topic, payload);
}
private void handleFailedMessage(ConsumerRecord<String, String> record) {
kafkaTemplate.send("dlq-" + record.topic(), record.value());
}
@Override
public void stop() {
if (running.compareAndSet(true, false)) {
if (consumer != null) consumer.wakeup();
if (consumerThread != null) {
try {
consumerThread.join(5000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
}
}
@Override
public boolean isRunning() {
return running.get();
}
}

View File

@ -0,0 +1,39 @@
package org.dromara.kafka2Websocket.consumer;
import lombok.extern.slf4j.Slf4j;
import org.dromara.kafka2Websocket.dto.SharedState;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Service;
import java.util.List;
@Service
@Slf4j
public class KafkaConsumerService {
@Autowired
private SharedState state;
@Autowired
private MessageBufferManager bufferManager;
@KafkaListener(topics = "${spring.kafka.topics}")
public void processMessage(List<String> records) {
log.error("flag的值={}",state.getFlag());
try {
records.stream().forEach(record -> {
if (state.getFlag()){
bufferManager.bufferMessage("3413",record);
}
});
} catch (Exception e){
e.printStackTrace();
}
}
}

View File

@ -0,0 +1,71 @@
package org.dromara.kafka2Websocket.consumer;
import com.alibaba.nacos.shaded.com.google.gson.Gson;
import jakarta.websocket.Session;
import org.dromara.kafka2Websocket.holder.WebSocketSessionHolder;
import org.dromara.kafka2Websocket.utils.SocketUtils;
import org.springframework.scheduling.annotation.Async;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;
import org.springframework.web.socket.WebSocketSession;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.*;
@Component
public class MessageBufferManager {
// 配置参数:批量大小和缓冲时间
private static final int BATCH_SIZE = 500;
private static final long BUFFER_TIME_MS = 100;
// 用户ID -> 消息队列(线程安全)
private final ConcurrentHashMap<String, LinkedBlockingQueue<String>> userMessageQueues = new ConcurrentHashMap<>();
// 添加消息到缓冲区
public void bufferMessage(String userId, String message) {
LinkedBlockingQueue<String> queue = userMessageQueues.computeIfAbsent(userId, k -> new LinkedBlockingQueue<>());
queue.offer(message);
// 触发批量发送(如果达到阈值)
if (queue.size() >= BATCH_SIZE) {
sendBufferedMessages(userId);
}
}
// 定时任务:按时间窗口发送
@Scheduled(fixedRate = BUFFER_TIME_MS)
@Async
public void flushBufferedMessages() {
userMessageQueues.keySet().forEach(this::sendBufferedMessages);
}
// 发送并清空缓冲区
private void sendBufferedMessages(String userId) {
LinkedBlockingQueue<String> queue = userMessageQueues.get(userId);
if (queue == null || queue.isEmpty()) return;
List<String> messages = new ArrayList<>();
queue.drainTo(messages); // 原子操作:取出所有消息并清空队列
if (!messages.isEmpty()) {
String mergedMessage = mergeMessages(messages);
Map<String, CopyOnWriteArraySet<WebSocketSession>> allOnlineSessions = WebSocketSessionHolder.getAllOnlineSessions();
allOnlineSessions.forEach((key, value) -> {
SocketUtils.sendMessage(key,mergedMessage);
});
/*WebSocketSessionHolder.getSessions(userId).forEach(session -> {
if (session.isOpen()) {
// todo 发送消息
SocketUtils.sendMessage(session,mergedMessage);
}
});*/
}
}
// 合并消息示例JSON 数组)
private String mergeMessages(List<String> messages) {
return "{\"type\":\"batch\", \"data\":" + new Gson().toJson(messages) + "}";
}
}

View File

@ -0,0 +1,33 @@
package org.dromara.kafka2Websocket.dto;
import org.springframework.stereotype.Component;
import java.util.concurrent.atomic.AtomicBoolean;
@Component
public class SharedState {
// 定义 AtomicBoolean 实例
private final AtomicBoolean flag = new AtomicBoolean(false);
// 修改标志位(原子操作)
public void setFlag(boolean value) {
flag.set(value);
}
//获取
public boolean getFlag() {
return flag.get();
}
// 原子性地从 false 修改为 true类似锁的获取
public boolean tryActivate() {
return flag.compareAndSet(false, true);
}
public boolean tryActivateFalse() {
return flag.compareAndSet(true, false);
}
}

View File

@ -0,0 +1,29 @@
package org.dromara.kafka2Websocket.dto;
import lombok.Data;
import java.io.Serial;
import java.io.Serializable;
import java.util.List;
/**
* dto
*
* @author zendwang
*/
@Data
public class WebSocketMessageDto implements Serializable {
@Serial
private static final long serialVersionUID = 1L;
/**
* session key
*/
private List<Long> sessionKeys;
/**
*
*/
private String message;
}

View File

@ -0,0 +1,113 @@
package org.dromara.kafka2Websocket.handle;
import cn.dev33.satoken.session.SaSession;
import cn.dev33.satoken.stp.StpUtil;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import jakarta.annotation.PreDestroy;
import lombok.extern.slf4j.Slf4j;
import org.dromara.common.satoken.utils.LoginHelper;
import org.dromara.kafka2Websocket.dto.SharedState;
import org.dromara.kafka2Websocket.holder.WebSocketSessionHolder;
import org.dromara.system.api.model.LoginUser;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.KafkaListenerEndpointRegistry;
import org.springframework.kafka.listener.MessageListenerContainer;
import org.springframework.web.socket.CloseStatus;
import org.springframework.web.socket.TextMessage;
import org.springframework.web.socket.WebSocketSession;
import org.springframework.web.socket.handler.TextWebSocketHandler;
import java.io.IOException;
import java.util.Collections;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import static org.dromara.common.satoken.utils.LoginHelper.LOGIN_USER_KEY;
// 4. WebSocket处理器WebSocketHandler.java
@Configuration
@Slf4j
public class KafkaWebSocketHandler extends TextWebSocketHandler {
private final ConcurrentHashMap<String, WebSocketSession> sessions = new ConcurrentHashMap<>();
private final AtomicInteger connectionCount = new AtomicInteger(0);
@Autowired
private SharedState state;
@Override
public void afterConnectionEstablished(WebSocketSession session) {
// session.getUri().getQuery();
// LoginUser loginUser = (LoginUser) session.getAttributes().get(LOGIN_USER_KEY);
sessions.put(session.getId(), session);
WebSocketSessionHolder.addSession(session.getId(),session);
connectionCount.incrementAndGet();
if(connectionCount.get() >0){ //有在线用户时
state.setFlag(true);
log.info("resume over ");
}
log.info("连接建立: {} 当前连接数: {}", session.getId(), sessions.size());
}
@Override
public void afterConnectionClosed(WebSocketSession session, CloseStatus status) {
sessions.remove(session.getId());
WebSocketSessionHolder.removeSession(session.getId());
connectionCount.decrementAndGet();
if(connectionCount.get() ==0){
// 暂停监听
state.setFlag(false);
log.info("pause");
}
log.info("连接关闭: {} 剩余连接数: {}", session.getId(), sessions.size());
}
public void broadcast(String message,String deptId) {
sessions.values().parallelStream()
.filter(WebSocketSession::isOpen)
.forEach(session -> sendMessage(session, deptId, message));
}
private void sendMessage(WebSocketSession session, String deptId, String message) {
try {
synchronized (session) { // 保证线程安全
JSONObject job = JSON.parseObject(message);
String zzjgdm = job.getString("zzjgdm");
if (deptId.endsWith("00000000")){ //如果管理机构是市局 不过滤
session.sendMessage(new TextMessage(message));
} else if (zzjgdm.substring(0,6).equals(deptId.substring(0,6))) { // 分局登录 分局接收
session.sendMessage(new TextMessage(message));
}else if (zzjgdm.substring(0,8).equals(deptId.substring(0,8))) { // 支队登录 支队接收
session.sendMessage(new TextMessage(message));
}
}
} catch (IOException e) {
// 处理发送失败
}
}
public boolean hasConnections() {
return connectionCount.get() > 0;
}
@PreDestroy
public void cleanUp() {
sessions.values().forEach(session -> {
try {
if (session.isOpen()) session.close();
} catch (IOException e) {
// 处理关闭异常
}
});
}
}

View File

@ -0,0 +1,116 @@
package org.dromara.kafka2Websocket.holder;
import jakarta.websocket.Session;
import lombok.AccessLevel;
import lombok.NoArgsConstructor;
import org.springframework.web.socket.CloseStatus;
import org.springframework.web.socket.WebSocketSession;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArraySet;
/**
* WebSocketSession 线
*
* @author zendwang
*/
@NoArgsConstructor(access = AccessLevel.PRIVATE)
public class WebSocketSessionHolder {
private static final Map<String, WebSocketSession> USER_SESSION_MAP = new ConcurrentHashMap<>();
// 用户ID -> 会话集合(支持多设备)
private static final ConcurrentHashMap<String, CopyOnWriteArraySet<WebSocketSession>> userSessions = new ConcurrentHashMap<>();
/**
* WebSocketMap
*
* @param sessionKey
* @param session WebSocket
*/
public static void addSession(String sessionKey, WebSocketSession session) {
removeSession(sessionKey);
userSessions.computeIfAbsent(sessionKey, k -> new CopyOnWriteArraySet<>()).add(session);
USER_SESSION_MAP.put(sessionKey, session);
}
/**
* MapWebSocket
*
* @param sessionKey
*/
public static void removeSession(String sessionKey) {
WebSocketSession session = USER_SESSION_MAP.remove(sessionKey);
CopyOnWriteArraySet<WebSocketSession> sessions = userSessions.get(sessionKey);
if (sessions != null) {
sessions.remove(session);
if (sessions.isEmpty()) {
userSessions.remove(sessionKey);
}
}
try {
session.close(CloseStatus.BAD_DATA);
} catch (Exception ignored) {
}
}
/**
* MapWebSocket
*
* @param sessionKey
* @return WebSocketnull
*/
public static WebSocketSession getSessions(Long sessionKey) {
return USER_SESSION_MAP.get(sessionKey);
}
// 获取用户所有会话(过滤已关闭连接)
public static CopyOnWriteArraySet<WebSocketSession> getSessions(String userId) {
CopyOnWriteArraySet<WebSocketSession> sessions = userSessions.get(userId);
if (sessions == null) return new CopyOnWriteArraySet<>();
// 清理已关闭的会话
sessions.removeIf(session -> !session.isOpen());
return sessions;
}
/**
* MapWebSocket
*
* @return WebSocket
*/
public static Set<String> getSessionsAll() {
return USER_SESSION_MAP.keySet();
}
/**
* Map
*
* @param sessionKey
* @return truefalse
*/
public static Boolean existSession(Long sessionKey) {
return USER_SESSION_MAP.containsKey(sessionKey);
}
// 获取所有在线用户ID列表
public static List<String> getAllOnlineUserIds() {
List<String> userIds = new ArrayList<>(userSessions.keySet());
userIds.removeIf(userId -> getSessions(userId).isEmpty());
return Collections.unmodifiableList(userIds);
}
// 获取所有在线会话(跨用户)
public static Map<String, CopyOnWriteArraySet<WebSocketSession>> getAllOnlineSessions() {
Map<String, CopyOnWriteArraySet<WebSocketSession>> copy = new ConcurrentHashMap<>();
userSessions.forEach((userId, sessions) -> {
CopyOnWriteArraySet<WebSocketSession> activeSessions = getSessions(userId);
if (!activeSessions.isEmpty()) {
copy.put(userId, activeSessions);
}
});
return Collections.unmodifiableMap(copy);
}
}

View File

@ -0,0 +1,42 @@
package org.dromara.kafka2Websocket.interceptor;
import cn.dev33.satoken.exception.NotLoginException;
import cn.hutool.http.server.HttpServerRequest;
import com.sun.net.httpserver.HttpExchange;
import lombok.extern.slf4j.Slf4j;
import org.dromara.common.satoken.utils.LoginHelper;
import org.dromara.system.api.model.LoginUser;
import org.springframework.http.server.ServerHttpRequest;
import org.springframework.http.server.ServerHttpResponse;
import org.springframework.web.socket.WebSocketHandler;
import org.springframework.web.socket.server.HandshakeInterceptor;
import java.util.Map;
// 7. 认证拦截器AuthInterceptor.java
@Slf4j
public class AuthInterceptor implements HandshakeInterceptor {
@Override
public boolean beforeHandshake(ServerHttpRequest request,
ServerHttpResponse response,
WebSocketHandler wsHandler,
Map<String, Object> attributes) {
try {
// LoginUser loginUser = LoginHelper.getLoginUser();
// attributes.put("loginUser", loginUser);
return true;
} catch (NotLoginException e) {
log.error("WebSocket 认证失败'{}',无法访问系统资源", e.getMessage());
return false;
}
}
@Override
public void afterHandshake(ServerHttpRequest request,
ServerHttpResponse response,
WebSocketHandler wsHandler,
Exception exception) {}
}

View File

@ -0,0 +1,153 @@
package org.dromara.kafka2Websocket.utils;
import cn.hutool.core.collection.CollUtil;
import lombok.AccessLevel;
import lombok.NoArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.dromara.common.redis.utils.RedisUtils;
import org.dromara.kafka2Websocket.dto.WebSocketMessageDto;
import org.dromara.kafka2Websocket.holder.WebSocketSessionHolder;
import org.springframework.web.socket.PongMessage;
import org.springframework.web.socket.TextMessage;
import org.springframework.web.socket.WebSocketMessage;
import org.springframework.web.socket.WebSocketSession;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.function.Consumer;
/**
* WebSocket
*
* @author zendwang
*/
@Slf4j
@NoArgsConstructor(access = AccessLevel.PRIVATE)
public class SocketUtils {
/**
* WebSocket
*
* @param sessionKey id
* @param message
*/
public static void sendMessage(Long sessionKey, String message) {
WebSocketSession session = WebSocketSessionHolder.getSessions(sessionKey);
sendMessage(session, message);
}
/**
* WebSocket
*
* @param consumer WebSocket
*/
public static void subscribeMessage(Consumer<WebSocketMessageDto> consumer) {
RedisUtils.subscribe("global:websocket", WebSocketMessageDto.class, consumer);
}
/**
* WebSocket
*
* @param webSocketMessage WebSocket
*/
public static void publishMessage(WebSocketMessageDto webSocketMessage) {
List<Long> unsentSessionKeys = new ArrayList<>();
// 当前服务内session,直接发送消息
for (Long sessionKey : webSocketMessage.getSessionKeys()) {
if (WebSocketSessionHolder.existSession(sessionKey)) {
SocketUtils.sendMessage(sessionKey, webSocketMessage.getMessage());
continue;
}
unsentSessionKeys.add(sessionKey);
}
// 不在当前服务内session,发布订阅消息
if (CollUtil.isNotEmpty(unsentSessionKeys)) {
WebSocketMessageDto broadcastMessage = new WebSocketMessageDto();
broadcastMessage.setMessage(webSocketMessage.getMessage());
broadcastMessage.setSessionKeys(unsentSessionKeys);
RedisUtils.publish("global:websocket", broadcastMessage, consumer -> {
log.info("WebSocket发送主题订阅消息topic:{} session keys:{} message:{}",
"global:websocket", unsentSessionKeys, webSocketMessage.getMessage());
});
}
}
/**
* WebSocket()
*
* @param message
*/
public static void publishAll(String message) {
WebSocketMessageDto broadcastMessage = new WebSocketMessageDto();
broadcastMessage.setMessage(message);
RedisUtils.publish("global:websocket", broadcastMessage, consumer -> {
log.info("WebSocket发送主题订阅消息topic:{} message:{}", "global:websocket", message);
});
}
/**
* WebSocketPong
*
* @param session PongWebSocket
*/
public static void sendPongMessage(WebSocketSession session) {
sendMessage(session, new PongMessage());
}
/**
* WebSocket
*
* @param session WebSocket
* @param message
*/
public static void sendMessage(WebSocketSession session, String message) {
sendMessage(session, new TextMessage(message));
}
public static void sendMessage(String sid, String message) {
WebSocketSessionHolder.getSessions(sid).forEach(session ->{
sendMessage(session, new TextMessage(message));
});
}
/**
* WebSocketWebSocket
*
* @param session WebSocket
* @param message WebSocket
*/
private synchronized static void sendMessage(WebSocketSession session, WebSocketMessage<?> message) {
if (session == null || !session.isOpen()) {
log.warn("[send] session会话已经关闭");
} else {
try {
session.sendMessage(message);
} catch (IOException e) {
log.error("[send] session({}) 发送消息({}) 异常", session, message, e);
}
}
}
/**
* WebSocketWebSocket
*
* @param session WebSocket
* @param message WebSocket
*/
private synchronized static void sendAllMessage(WebSocketSession session, WebSocketMessage<?> message) {
if (session == null || !session.isOpen()) {
log.warn("[send] session会话已经关闭");
} else {
try {
session.sendMessage(message);
} catch (IOException e) {
log.error("[send] session({}) 发送消息({}) 异常", session, message, e);
}
}
}
}

View File

@ -0,0 +1,34 @@
# Tomcat
server:
port: 9216
# Spring
spring:
application:
# 应用名称
name: wzhj-kafka2websocket
profiles:
# 环境配置
active: @profiles.active@
--- # nacos 配置
spring:
cloud:
nacos:
# nacos 服务地址
server-addr: @nacos.server@
username: @nacos.username@
password: @nacos.password@
discovery:
# 注册组
group: @nacos.discovery.group@
namespace: ${spring.profiles.active}
config:
# 配置组
group: @nacos.config.group@
namespace: ${spring.profiles.active}
config:
import:
- optional:nacos:application-common.yml
- optional:nacos:datasource.yml
- optional:nacos:${spring.application.name}.yml

View File

@ -0,0 +1,10 @@
Spring Boot Version: ${spring-boot.version}
Spring Application Name: ${spring.application.name}
_ _
(_) | |
_ __ _ _ ___ _ _ _ ______ ___ _ _ ___ | |_ ___ _ __ ___
| '__|| | | | / _ \ | | | || ||______|/ __|| | | |/ __|| __| / _ \| '_ ` _ \
| | | |_| || (_) || |_| || | \__ \| |_| |\__ \| |_ | __/| | | | | |
|_| \__,_| \___/ \__, ||_| |___/ \__, ||___/ \__| \___||_| |_| |_|
__/ | __/ |
|___/ |___/

View File

@ -0,0 +1,86 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="60 seconds" debug="false">
<!-- 日志存放路径 -->
<property name="log.path" value="logs/${project.artifactId}" />
<!-- 日志输出格式 -->
<property name="console.log.pattern"
value="%red(%d{yyyy-MM-dd HH:mm:ss}) %green([%thread]) %highlight(%-5level) %boldMagenta(%logger{36}%n) - %msg%n"/>
<!-- &lt;!&ndash; 控制台输出 &ndash;&gt;
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${console.log.pattern}</pattern>
<charset>utf-8</charset>
</encoder>
</appender>-->
<include resource="logback-common.xml" />
<include resource="logback-logstash.xml" />
<!-- 开启 skywalking 日志收集 -->
<include resource="logback-skylog.xml" />
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--如果只是想要 Info 级别的日志,只是过滤 info 还是会输出 Error 日志,因为 Error 的级别高,
所以我们使用下面的策略,可以避免输出 Error 的日志-->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<!--过滤 Error-->
<level>ERROR</level>
<!--匹配到就禁止-->
<onMatch>DENY</onMatch>
<!--没有匹配到就允许-->
<onMismatch>ACCEPT</onMismatch>
</filter>
<!--日志名称如果没有File 属性那么只会使用FileNamePattern的文件路径规则
如果同时有<File><FileNamePattern>,那么当天日志是<File>,明天会自动把今天
的日志改名为今天的日期。即,<File> 的日志都是当天的。
-->
<File>${LOG_PATH}${LOG_FILE}</File>
<encoder>
<charset>UTF-8</charset>
<pattern>%date [%level] [%thread] %logger{60} [%file : %line] %msg%n</pattern>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_PATH}info/${LOG_FILE}.%d{yyyy-MM-dd}.%i.gz</fileNamePattern>
<maxFileSize>50MB</maxFileSize>
<maxHistory>20</maxHistory> <!-- 保留180天 -->
</rollingPolicy>
</appender>
<!--error log-->
<appender name="ERRORFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--如果只是想要 Error 级别的日志,那么需要过滤一下,默认是 info 级别的ThresholdFilter-->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>Error</level>
</filter>
<!--日志名称如果没有File 属性那么只会使用FileNamePattern的文件路径规则
如果同时有<File><FileNamePattern>,那么当天日志是<File>,明天会自动把今天
的日志改名为今天的日期。即,<File> 的日志都是当天的。
-->
<File>${LOG_PATH}error.${LOG_FILE}</File>
<!--滚动策略,按照时间滚动 TimeBasedRollingPolicy-->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy ">
<!--文件路径,定义了日志的切分方式——把每一天的日志归档到一个文件中,以防止日志填满整个磁盘空间-->
<FileNamePattern>${LOG_PATH}error/${LOG_FILE}.%d{yyyy-MM-dd}.%i.gz</FileNamePattern>
<!--只保留最近90天的日志-->
<maxFileSize>50MB</maxFileSize>
<maxHistory>180</maxHistory>
<!--用来指定日志文件的上限大小,那么到了这个值,就会删除旧的日志-->
<!--<totalSizeCap>1GB</totalSizeCap>-->
</rollingPolicy>
<!--日志输出编码格式化-->
<encoder>
<charset>UTF-8</charset>
<pattern>%date [%level] [%thread] %logger{60} [%file : %line] %msg%n</pattern>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="CONSOLE"/>
<appender-ref ref="FILE"/>
<appender-ref ref="ERRORFILE"/>
</root>
</configuration>

View File

@ -103,6 +103,17 @@
<groupId>org.dromara</groupId>
<artifactId>stwzhj-api-resource</artifactId>
</dependency>
<dependency>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-api-location</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-websocket</artifactId>
</dependency>
<!--elasticsearch-->
<dependency>
<groupId>org.elasticsearch</groupId>

View File

@ -5,6 +5,7 @@ import org.apache.dubbo.config.spring.context.annotation.EnableDubbo;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.context.metrics.buffering.BufferingApplicationStartup;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.scheduling.annotation.EnableScheduling;
@EnableDubbo

View File

@ -9,6 +9,7 @@ import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import org.apache.dubbo.config.annotation.DubboReference;
import org.dromara.common.core.domain.R;
import org.dromara.common.core.utils.RedisConstants;
import org.dromara.common.redis.utils.RedisUtils;
import org.dromara.system.api.RemoteDeviceService;
import org.dromara.system.api.domain.bo.RemoteDeviceBo;
@ -177,23 +178,20 @@ public class LocationController {
String lat = params.get("lat").toString();
String lng = params.get("lng").toString();
String dist = params.get("distance").toString();
/* List<GeoRadiusResponse> geoRadiusResponses = redisUtil.nearByXYReadonly(RedisConstants.ONLINE_USERS_GEO,
List<String> geoRadiusResponses = RedisUtils.nearByXYReadonly(
Double.parseDouble(lng), Double.parseDouble(lat), Double.parseDouble(dist));
List<Device> list = new ArrayList<>();
for (GeoRadiusResponse geoRadiusRespons : geoRadiusResponses) {
String memberByString = geoRadiusRespons.getMemberByString();
logger.info("member:"+memberByString);
String[] strs = memberByString.split("#");
logger.info("key值:"+keys+":"+strs[0]+":"+strs[1]+":"+strs[2]);
Object object = redisUtil.get(keys+":"+strs[0]+":"+strs[1]+":"+strs[2]);
List<JSONObject> list = new ArrayList<>();
for (String geoRadiusRespons : geoRadiusResponses) {
logger.info("member:"+geoRadiusRespons);
String[] strs = geoRadiusRespons.split("#");
logger.info("key值:"+keys+":"+strs[0]+":"+strs[1]);
JSONObject object = RedisUtils.getBucket(keys+":"+strs[0]+":"+strs[1]);
if (null != object){
Device device = FastJSONUtil.parsePojo(object.toString(), Device.class);
//device = rebuildDevice(device);
list.add(device);
list.add(object);
}
}*/
}
return R.ok();
return R.ok(list);
}
/*+

View File

@ -0,0 +1,23 @@
package org.dromara.location.dubbo;
import lombok.RequiredArgsConstructor;
import org.apache.dubbo.config.annotation.DubboService;
import org.dromara.location.api.RemoteElasticSearchService;
import org.dromara.location.service.ISearchService;
import org.springframework.stereotype.Service;
import java.util.List;
@RequiredArgsConstructor
@Service
@DubboService
public class RemoteElasticSearchServiceImpl implements RemoteElasticSearchService {
private final ISearchService searchService;
@Override
public List<String> linstenDataStatus() {
return searchService.linstenDataStatus();
}
}

View File

@ -1 +0,0 @@
package org.dromara.location;

View File

@ -1,5 +1,7 @@
package org.dromara.location.service;
import org.dromara.system.api.domain.vo.RemoteDictDataVo;
import java.util.List;
import java.util.Map;
@ -7,5 +9,10 @@ import java.util.Map;
public interface ISearchService {
public List<Map> searchCar(String deviceCode, String startTime, String endTime,String deviceType) ;
/*
* ES
* */
public List<String> linstenDataStatus();
}

View File

@ -2,9 +2,14 @@ package org.dromara.location.service.impl;
import cn.hutool.core.date.DateField;
import cn.hutool.core.date.DateTime;
import cn.hutool.core.date.DateUnit;
import cn.hutool.core.date.DateUtil;
import com.alibaba.fastjson.JSON;
import lombok.RequiredArgsConstructor;
import org.apache.dubbo.config.annotation.DubboReference;
import org.dromara.location.service.ISearchService;
import org.dromara.system.api.RemoteDictService;
import org.dromara.system.api.domain.vo.RemoteDictDataVo;
import org.elasticsearch.action.search.*;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.RequestOptions;
@ -13,20 +18,22 @@ import org.elasticsearch.core.TimeValue;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.query.TermQueryBuilder;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.sort.SortBuilders;
import org.elasticsearch.search.sort.SortOrder;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.domain.PageRequest;
import org.springframework.data.domain.Pageable;
import org.springframework.stereotype.Service;
import javax.annotation.Resource;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.*;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
@ -38,6 +45,11 @@ public class SearchServiceImpl implements ISearchService {
@Autowired
private RestHighLevelClient restHighLevelClient;
@DubboReference
RemoteDictService dictService;
@Override
public List<Map> searchCar(String deviceCode, String startTime, String endTime,String deviceType) throws RuntimeException{
@ -117,8 +129,52 @@ public class SearchServiceImpl implements ISearchService {
return sourceList;
}
@Override
public List<String> linstenDataStatus() {
List<RemoteDictDataVo> list = dictService.selectDictDataByType("zd_device_type");
List<String> maps = new ArrayList<>();
for (RemoteDictDataVo dataVo : list) {
try {
BoolQueryBuilder boolBuilder = QueryBuilders.boolQuery();
// 匹配第二个
TermQueryBuilder termTerminalBuilder2 = QueryBuilders.termQuery("deviceType", dataVo.getDictValue());
boolBuilder.must(termTerminalBuilder2);
SearchRequest searchRequest = new SearchRequest();
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
//排序条件
searchSourceBuilder.sort("gpsTime", SortOrder.DESC);
searchSourceBuilder.query(boolBuilder);
searchSourceBuilder.size(1);
searchRequest.source(searchSourceBuilder);
searchRequest.indicesOptions(IndicesOptions.lenientExpandOpen());
String index = "gpsinfo"+DateUtil.format(new Date(),"YYYYMMdd");
searchRequest.indices(index);
// 执行查询,然后处理响应结果
SearchResponse searchResponse = restHighLevelClient.search(searchRequest, RequestOptions.DEFAULT);
// 根据状态和数据条数验证是否返回了数据
if (RestStatus.OK.equals(searchResponse.status()) && searchResponse.getHits().getTotalHits().value > 0) {
SearchHits hits = searchResponse.getHits();
for (SearchHit hit : hits) {
// 将 JSON 转换成对象
Map sourceAsMap = hit.getSourceAsMap();
String time = sourceAsMap.get("gpsTime").toString();
if (DateUtil.between(new Date(),DateUtil.parse(time) , DateUnit.MINUTE)>=30){
maps.add(dataVo.getDictLabel());
}
}
}else {
maps.add(dataVo.getDictLabel());
}
}catch (Exception e){
e.printStackTrace();
}
}
return maps;
}
private List<String> findEsIndexByTime(String startTime, String endTime) {
startTime = startTime.substring(0, 10).replaceAll("-","");//yyyyMMdd

View File

@ -1,18 +1,18 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="60 seconds" debug="false">
<!-- 日志存放路径 -->
<property name="log.path" value="logs/${project.artifactId}" />
<!-- 日志输出格式 -->
<property name="log.path" value="logs/${project.artifactId}" />
<!-- 日志输出格式 -->
<property name="console.log.pattern"
value="%red(%d{yyyy-MM-dd HH:mm:ss}) %green([%thread]) %highlight(%-5level) %boldMagenta(%logger{36}%n) - %msg%n"/>
<!-- 控制台输出 -->
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${console.log.pattern}</pattern>
<charset>utf-8</charset>
</encoder>
</appender>
<!-- &lt;!&ndash; 控制台输出 &ndash;&gt;
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${console.log.pattern}</pattern>
<charset>utf-8</charset>
</encoder>
</appender>-->
<include resource="logback-common.xml" />
@ -21,8 +21,66 @@
<!-- 开启 skywalking 日志收集 -->
<include resource="logback-skylog.xml" />
<!--系统操作日志-->
<root level="info">
<appender-ref ref="console" />
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--如果只是想要 Info 级别的日志,只是过滤 info 还是会输出 Error 日志,因为 Error 的级别高,
所以我们使用下面的策略,可以避免输出 Error 的日志-->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<!--过滤 Error-->
<level>ERROR</level>
<!--匹配到就禁止-->
<onMatch>DENY</onMatch>
<!--没有匹配到就允许-->
<onMismatch>ACCEPT</onMismatch>
</filter>
<!--日志名称如果没有File 属性那么只会使用FileNamePattern的文件路径规则
如果同时有<File><FileNamePattern>,那么当天日志是<File>,明天会自动把今天
的日志改名为今天的日期。即,<File> 的日志都是当天的。
-->
<File>${LOG_PATH}${LOG_FILE}</File>
<encoder>
<charset>UTF-8</charset>
<pattern>%date [%level] [%thread] %logger{60} [%file : %line] %msg%n</pattern>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_PATH}info/${LOG_FILE}.%d{yyyy-MM-dd}.%i.gz</fileNamePattern>
<maxFileSize>50MB</maxFileSize>
<maxHistory>20</maxHistory> <!-- 保留180天 -->
</rollingPolicy>
</appender>
<!--error log-->
<appender name="ERRORFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--如果只是想要 Error 级别的日志,那么需要过滤一下,默认是 info 级别的ThresholdFilter-->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>Error</level>
</filter>
<!--日志名称如果没有File 属性那么只会使用FileNamePattern的文件路径规则
如果同时有<File><FileNamePattern>,那么当天日志是<File>,明天会自动把今天
的日志改名为今天的日期。即,<File> 的日志都是当天的。
-->
<File>${LOG_PATH}error.${LOG_FILE}</File>
<!--滚动策略,按照时间滚动 TimeBasedRollingPolicy-->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy ">
<!--文件路径,定义了日志的切分方式——把每一天的日志归档到一个文件中,以防止日志填满整个磁盘空间-->
<FileNamePattern>${LOG_PATH}error/${LOG_FILE}.%d{yyyy-MM-dd}.%i.gz</FileNamePattern>
<!--只保留最近90天的日志-->
<maxFileSize>50MB</maxFileSize>
<maxHistory>180</maxHistory>
<!--用来指定日志文件的上限大小,那么到了这个值,就会删除旧的日志-->
<!--<totalSizeCap>1GB</totalSizeCap>-->
</rollingPolicy>
<!--日志输出编码格式化-->
<encoder>
<charset>UTF-8</charset>
<pattern>%date [%level] [%thread] %logger{60} [%file : %line] %msg%n</pattern>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="CONSOLE"/>
<appender-ref ref="FILE"/>
<appender-ref ref="ERRORFILE"/>
</root>
</configuration>

View File

@ -6,7 +6,7 @@ server:
spring:
application:
# 应用名称
name: ruoyi-resource
name: wzhj-resource
profiles:
# 环境配置
active: @profiles.active@

View File

@ -104,6 +104,11 @@
<artifactId>stwzhj-api-resource</artifactId>
</dependency>
<dependency>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-api-location</artifactId>
</dependency>
<dependency>
<groupId>com.github.jeffreyning</groupId>
<artifactId>mybatisplus-plus</artifactId>

View File

@ -4,14 +4,20 @@ package org.dromara.system.controller.system;
import cn.hutool.core.date.DateUtil;
import jdk.dynalink.linker.LinkerServices;
import lombok.RequiredArgsConstructor;
import org.apache.dubbo.config.annotation.DubboReference;
import org.dromara.common.core.domain.R;
import org.dromara.common.redis.utils.RedisUtils;
import org.dromara.common.web.core.BaseController;
import org.dromara.location.api.RemoteElasticSearchService;
import org.dromara.system.domain.DeviceRedis;
import org.dromara.system.domain.SysNotice;
import org.dromara.system.domain.bo.SysDeptBo;
import org.dromara.system.domain.bo.SysNoticeBo;
import org.dromara.system.domain.bo.TDeviceBo;
import org.dromara.system.domain.vo.*;
import org.dromara.system.service.*;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.web.bind.annotation.*;
import java.util.*;
@ -28,6 +34,12 @@ public class IndexStaticsController extends BaseController {
private final IDeviceRedisService redisService;
private final ISysNoticeService noticeService;
@DubboReference
RemoteElasticSearchService elasticSearchService;
/*
@ -75,6 +87,27 @@ public class IndexStaticsController extends BaseController {
return R.ok(maps);
}
/*
* ES
* */
@Scheduled(cron = "0 */30 * * * ?")
public void listen(){
List<String> strs = elasticSearchService.linstenDataStatus();
if (strs.size() >0){
List<SysNoticeVo> nlist = noticeService.selectTodayNoticeList();
if (nlist.size() <=2){
// -- todo 发送短信
SysNoticeBo noticeBo = new SysNoticeBo();
noticeBo.setNoticeTitle("手机号码");
noticeBo.setNoticeType("3");
noticeBo.setNoticeContent(strs.toString()+"数据不正常,请检查服务是否正常");
noticeBo.setCreateTime(DateUtil.date());
noticeService.insertNotice(noticeBo);
}
}
}
/*
* 线
* */

View File

@ -40,6 +40,8 @@ public class SysDept extends TenantEntity {
*/
private String deptName;
private String shortName;
/**
*
@ -79,4 +81,6 @@ public class SysDept extends TenantEntity {
private String fullName;
private String isVisible;
}

View File

@ -24,6 +24,7 @@ public class SysDeptBo extends BaseEntity {
/**
* id
*/
@Size(min = 0, max = 12, message = "部门机构代码长度不能超过{max}个字符")
private String deptId;
/**
@ -38,6 +39,9 @@ public class SysDeptBo extends BaseEntity {
@Size(min = 0, max = 30, message = "部门名称长度不能超过{max}个字符")
private String deptName;
@NotBlank(message = "部门简称不能为空")
private String shortName;
/**
*
*/
@ -75,4 +79,6 @@ public class SysDeptBo extends BaseEntity {
private String fullName;
private String isVisible;
}

View File

@ -77,6 +77,10 @@ public class TDeviceBo extends BaseEntity {
private String cardNum;
private String tdbm;
private String gbbm;
/**
* 01
*/

View File

@ -52,6 +52,8 @@ public class SysDeptVo implements Serializable {
@ExcelProperty(value = "部门名称")
private String deptName;
private String shortName;
/**
*
*/
@ -93,6 +95,8 @@ public class SysDeptVo implements Serializable {
@ExcelDictFormat(dictType = "sys_normal_disable")
private String status;
private String isVisible;
/**
*
*/

View File

@ -16,13 +16,13 @@ public class TDeviceExportVo implements Serializable {
@Serial
private static final long serialVersionUID = 1L;
@ExcelProperty(value = "设备编")
@ExcelProperty(value = "设备编")
private String deviceCode;
/**
*
*/
@ExcelProperty(value = "设备类型")
@ExcelProperty(value = "设备类型", converter = ExcelDictConvert.class)
@ExcelDictFormat(dictType = "zd_device_type")
private String deviceType;
@ -41,35 +41,41 @@ public class TDeviceExportVo implements Serializable {
/**
*
*/
@ExcelProperty(value = "警号", converter = ExcelDictConvert.class)
@ExcelProperty(value = "警号")
private String policeNo;
/**
*
*/
@ExcelProperty(value = "警员姓名", converter = ExcelDictConvert.class)
@ExcelProperty(value = "警员姓名")
private String policeName;
/**
*
*/
@ExcelProperty(value = "电话号码", converter = ExcelDictConvert.class)
@ExcelProperty(value = "电话号码")
private String phoneNum;
/**
*
*/
@ExcelProperty(value = "车牌号", converter = ExcelDictConvert.class)
@ExcelProperty(value = "车牌号")
private String carNum;
@ExcelProperty(value = "证件号码", converter = ExcelDictConvert.class)
@ExcelProperty(value = "证件号码")
private String cardNum;
@ExcelProperty(value = "通道编码")
private String tdbm;
@ExcelProperty(value = "国标编码")
private String gbbm;
/**
* 01
*/
@ExcelProperty(value = "有效性")
@ExcelDictFormat(readConverterExp = "1=有效,0=无效")
@ExcelProperty(value = "有效性", converter = ExcelDictConvert.class)
@ExcelDictFormat(dictType = "zd_device_status")
private Integer valid;
/**

View File

@ -16,7 +16,7 @@ public class TDeviceImportVo implements Serializable {
@Serial
private static final long serialVersionUID = 1L;
@ExcelProperty(value = "设备编")
@ExcelProperty(value = "设备编")
private String deviceCode;
/**
@ -65,6 +65,12 @@ public class TDeviceImportVo implements Serializable {
@ExcelProperty(value = "证件号码")
private String cardNum;
@ExcelProperty(value = "通道编码")
private String tdbm;
@ExcelProperty(value = "国标编码")
private String gbbm;
/**
* 01
*/

View File

@ -91,6 +91,10 @@ public class TDeviceVo implements Serializable {
private String cardNum;
private String tdbm;
private String gbbm;
/**
* 01
*/

View File

@ -16,8 +16,10 @@ import org.dromara.common.excel.core.ExcelListener;
import org.dromara.common.excel.core.ExcelResult;
import org.dromara.common.satoken.utils.LoginHelper;
import org.dromara.system.domain.bo.TDeviceBo;
import org.dromara.system.domain.vo.SysDeptVo;
import org.dromara.system.domain.vo.TDeviceImportVo;
import org.dromara.system.domain.vo.TDeviceVo;
import org.dromara.system.service.ISysDeptService;
import org.dromara.system.service.ITDeviceService;
import java.util.List;
@ -27,6 +29,8 @@ public class TDeviceImportListener extends AnalysisEventListener<TDeviceImportVo
private final ITDeviceService deviceService;
private final ISysDeptService deptService;
private final Boolean isUpdateSupport;
@ -37,42 +41,60 @@ public class TDeviceImportListener extends AnalysisEventListener<TDeviceImportVo
public TDeviceImportListener(Boolean isUpdateSupport) {
this.deviceService = SpringUtils.getBean(ITDeviceService.class);
this.deptService = SpringUtils.getBean(ISysDeptService.class);
this.isUpdateSupport = isUpdateSupport;
}
@Override
public void invoke(TDeviceImportVo deviceImportVo, AnalysisContext context) {
TDeviceVo deviceVo = this.deviceService.queryByDeviceCode(deviceImportVo.getDeviceCode());
try {
// 验证是否存在这个设备
if (ObjectUtil.isNull(deviceVo)) {
TDeviceBo deviceBo = BeanUtil.toBean(deviceImportVo, TDeviceBo.class);
ValidatorUtils.validate(deviceBo);
deviceService.insertByBo(deviceBo);
successNum++;
successMsg.append("<br/>").append(successNum).append("、设备 ").append(deviceBo.getDeviceCode()).append(" 导入成功");
} else if (isUpdateSupport) {
Long id = deviceVo.getId();
TDeviceBo deviceBo = BeanUtil.toBean(deviceVo, TDeviceBo.class);
deviceBo.setId(id);
ValidatorUtils.validate(deviceBo);
deviceService.updateByBo(deviceBo);
successNum++;
successMsg.append("<br/>").append(successNum).append("、设备 ").append(deviceImportVo.getDeviceCode()).append(" 更新成功");
} else {
if (null != deviceImportVo.getDeviceCode() && deviceImportVo.getDeviceCode().contains("设备编码为国标编码")){
failureNum = 1;
failureMsg.append("请删除Excel中提示必须删除的行后重新导入");
}else {
String deviceCode = deviceImportVo.getDeviceCode().replaceAll("[\\p{Zs}\\s]", "");
TDeviceVo deviceVo = this.deviceService.queryByDeviceCode(deviceCode);
try {
// 验证是否存在这个设备
if (ObjectUtil.isNull(deviceVo)) {
deviceImportVo.setDeviceCode(deviceCode);
SysDeptVo deptVo = deptService.selectDeptById(deviceImportVo.getZzjgdm());
if(null != deptVo){
deviceImportVo.setZzjgmc(deptVo.getShortName());
}
TDeviceBo deviceBo = BeanUtil.toBean(deviceImportVo, TDeviceBo.class);
ValidatorUtils.validate(deviceBo);
deviceService.insertByBo(deviceBo);
successNum++;
successMsg.append("<br/>").append(successNum).append("、设备 ").append(deviceBo.getDeviceCode()).append(" 导入成功");
} else if (isUpdateSupport) {
Long id = deviceVo.getId();
SysDeptVo deptVo = deptService.selectDeptById(deviceVo.getZzjgdm());
if(null != deptVo){
deviceVo.setZzjgmc(deptVo.getShortName());
}
TDeviceBo deviceBo = BeanUtil.toBean(deviceVo, TDeviceBo.class);
deviceBo.setId(id);
ValidatorUtils.validate(deviceBo);
deviceService.updateByBo(deviceBo);
successNum++;
successMsg.append("<br/>").append(successNum).append("、设备 ").append(deviceImportVo.getDeviceCode()).append(" 更新成功");
} else {
failureNum++;
failureMsg.append("<br/>").append(failureNum).append("、设备 ").append(deviceImportVo.getDeviceCode()).append(" 已存在");
}
} catch (Exception e) {
failureNum++;
failureMsg.append("<br/>").append(failureNum).append("、设备 ").append(deviceImportVo.getDeviceCode()).append(" 已存在");
String msg = "<br/>" + failureNum + "、设备 " + HtmlUtil.cleanHtmlTag(deviceImportVo.getDeviceCode()) + " 导入失败:";
String message = e.getMessage();
if (e instanceof ConstraintViolationException cvException) {
message = StreamUtils.join(cvException.getConstraintViolations(), ConstraintViolation::getMessage, ", ");
}
failureMsg.append(msg).append(message);
log.error(msg, e);
}
} catch (Exception e) {
failureNum++;
String msg = "<br/>" + failureNum + "、设备 " + HtmlUtil.cleanHtmlTag(deviceImportVo.getDeviceCode()) + " 导入失败:";
String message = e.getMessage();
if (e instanceof ConstraintViolationException cvException) {
message = StreamUtils.join(cvException.getConstraintViolations(), ConstraintViolation::getMessage, ", ");
}
failureMsg.append(msg).append(message);
log.error(msg, e);
}
}
@Override

View File

@ -33,6 +33,8 @@ public interface ISysNoticeService {
*/
List<SysNoticeVo> selectNoticeList(SysNoticeBo notice);
List<SysNoticeVo> selectTodayNoticeList();
/**
*
*

View File

@ -82,6 +82,7 @@ public class SysDeptServiceImpl implements ISysDeptService {
lqw.eq(StringUtils.isNotBlank(bo.getParentId()), SysDept::getParentId, bo.getParentId());
lqw.like(StringUtils.isNotBlank(bo.getDeptName()), SysDept::getDeptName, bo.getDeptName());
lqw.eq(StringUtils.isNotBlank(bo.getStatus()), SysDept::getStatus, bo.getStatus());
lqw.eq(StringUtils.isNotBlank(bo.getIsVisible()),SysDept::getIsVisible,bo.getIsVisible());
lqw.eq(StringUtils.isNotBlank(bo.getFullName()), SysDept::getFullName, bo.getFullName());
lqw.orderByAsc(SysDept::getAncestors);
lqw.orderByAsc(SysDept::getParentId);
@ -104,7 +105,7 @@ public class SysDeptServiceImpl implements ISysDeptService {
return TreeBuildUtils.build(depts, (dept, tree) ->
tree.setId(dept.getDeptId())
.setParentId(dept.getParentId())
.setName(dept.getDeptName())
.setName(dept.getShortName())
.setWeight(dept.getOrderNum()));
}
@ -289,6 +290,10 @@ public class SysDeptServiceImpl implements ISysDeptService {
// 如果该部门是启用状态,则启用该部门的所有上级部门
updateParentDeptStatusNormal(dept);
}
if ("0".equals(dept.getIsVisible())){ //如果隐藏该部门则其子部门全部隐藏
updateDeptChildrenVisiable(dept.getDeptId(),"0");
}
return result;
}
@ -305,6 +310,19 @@ public class SysDeptServiceImpl implements ISysDeptService {
.in(SysDept::getDeptId, Arrays.asList(deptIds)));
}
/*
*
*
* */
private void updateDeptChildrenVisiable(String deptId,String visible){
List<SysDept> children = baseMapper.selectList(new LambdaQueryWrapper<SysDept>()
.apply(DataBaseHelper.findInSet(deptId, "ancestors")));
for (SysDept child : children) {
child.setIsVisible(visible);
baseMapper.updateById(child);
}
}
/**
*
*

View File

@ -1,5 +1,6 @@
package org.dromara.system.service.impl;
import cn.hutool.core.date.DateUtil;
import cn.hutool.core.util.ObjectUtil;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.baomidou.mybatisplus.core.toolkit.Wrappers;
@ -20,6 +21,7 @@ import lombok.RequiredArgsConstructor;
import org.springframework.stereotype.Service;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
/**
@ -64,6 +66,14 @@ public class SysNoticeServiceImpl implements ISysNoticeService {
return baseMapper.selectVoList(lqw);
}
@Override
public List<SysNoticeVo> selectTodayNoticeList() {
LambdaQueryWrapper<SysNotice> lqw = new LambdaQueryWrapper<>();
lqw.likeRight(SysNotice::getCreateTime, DateUtil.formatDate(new Date()));
lqw.eq(SysNotice::getNoticeType,"3");
return baseMapper.selectVoList(lqw);
}
private LambdaQueryWrapper<SysNotice> buildQueryWrapper(SysNoticeBo bo) {
LambdaQueryWrapper<SysNotice> lqw = Wrappers.lambdaQuery();
lqw.like(StringUtils.isNotBlank(bo.getNoticeTitle()), SysNotice::getNoticeTitle, bo.getNoticeTitle());

View File

@ -166,6 +166,7 @@ public class TDeviceServiceImpl implements ITDeviceService {
*/
@Override
public Boolean insertByBo(TDeviceBo bo) {
bo.setDeviceCode(bo.getDeviceCode().replaceAll("[\\p{Zs}\\s]", ""));
TDevice add = MapstructUtils.convert(bo, TDevice.class);
validEntityBeforeSave(add);
add.setCreateTime(DateUtil.now());

View File

@ -1,18 +1,18 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="60 seconds" debug="false">
<!-- 日志存放路径 -->
<property name="log.path" value="logs/${project.artifactId}" />
<!-- 日志输出格式 -->
<property name="log.path" value="logs/${project.artifactId}" />
<!-- 日志输出格式 -->
<property name="console.log.pattern"
value="%red(%d{yyyy-MM-dd HH:mm:ss}) %green([%thread]) %highlight(%-5level) %boldMagenta(%logger{36}%n) - %msg%n"/>
<!-- 控制台输出 -->
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${console.log.pattern}</pattern>
<charset>utf-8</charset>
</encoder>
</appender>
<!-- &lt;!&ndash; 控制台输出 &ndash;&gt;
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${console.log.pattern}</pattern>
<charset>utf-8</charset>
</encoder>
</appender>-->
<include resource="logback-common.xml" />
@ -21,8 +21,66 @@
<!-- 开启 skywalking 日志收集 -->
<include resource="logback-skylog.xml" />
<!--系统操作日志-->
<root level="info">
<appender-ref ref="console" />
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--如果只是想要 Info 级别的日志,只是过滤 info 还是会输出 Error 日志,因为 Error 的级别高,
所以我们使用下面的策略,可以避免输出 Error 的日志-->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<!--过滤 Error-->
<level>ERROR</level>
<!--匹配到就禁止-->
<onMatch>DENY</onMatch>
<!--没有匹配到就允许-->
<onMismatch>ACCEPT</onMismatch>
</filter>
<!--日志名称如果没有File 属性那么只会使用FileNamePattern的文件路径规则
如果同时有<File><FileNamePattern>,那么当天日志是<File>,明天会自动把今天
的日志改名为今天的日期。即,<File> 的日志都是当天的。
-->
<File>${LOG_PATH}${LOG_FILE}</File>
<encoder>
<charset>UTF-8</charset>
<pattern>%date [%level] [%thread] %logger{60} [%file : %line] %msg%n</pattern>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_PATH}info/${LOG_FILE}.%d{yyyy-MM-dd}.%i.gz</fileNamePattern>
<maxFileSize>50MB</maxFileSize>
<maxHistory>20</maxHistory> <!-- 保留180天 -->
</rollingPolicy>
</appender>
<!--error log-->
<appender name="ERRORFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--如果只是想要 Error 级别的日志,那么需要过滤一下,默认是 info 级别的ThresholdFilter-->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>Error</level>
</filter>
<!--日志名称如果没有File 属性那么只会使用FileNamePattern的文件路径规则
如果同时有<File><FileNamePattern>,那么当天日志是<File>,明天会自动把今天
的日志改名为今天的日期。即,<File> 的日志都是当天的。
-->
<File>${LOG_PATH}error.${LOG_FILE}</File>
<!--滚动策略,按照时间滚动 TimeBasedRollingPolicy-->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy ">
<!--文件路径,定义了日志的切分方式——把每一天的日志归档到一个文件中,以防止日志填满整个磁盘空间-->
<FileNamePattern>${LOG_PATH}error/${LOG_FILE}.%d{yyyy-MM-dd}.%i.gz</FileNamePattern>
<!--只保留最近90天的日志-->
<maxFileSize>50MB</maxFileSize>
<maxHistory>180</maxHistory>
<!--用来指定日志文件的上限大小,那么到了这个值,就会删除旧的日志-->
<!--<totalSizeCap>1GB</totalSizeCap>-->
</rollingPolicy>
<!--日志输出编码格式化-->
<encoder>
<charset>UTF-8</charset>
<pattern>%date [%level] [%thread] %logger{60} [%file : %line] %msg%n</pattern>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="CONSOLE"/>
<appender-ref ref="FILE"/>
<appender-ref ref="ERRORFILE"/>
</root>
</configuration>

View File

@ -70,7 +70,7 @@
</if>
group by SUBSTRING(zzjgdm,1,8)) a
on SUBSTRING(d.dept_id,1,8) = SUBSTRING(a.zzjgdm,1,8)
where `status` = '0' and del_flag =0 and parent_id = '341800000000' and dept_id like '341800%'
where `status` = '0' and del_flag =0 and is_visible = '1' and parent_id = '341300000000' and dept_id like '341300%'
union
@ -92,7 +92,7 @@
</if>
group by SUBSTRING(zzjgdm,1,6)) a
on SUBSTRING(d.dept_id,1,6) = SUBSTRING(a.zzjgdm,1,6)
where `status` = '0' and del_flag =0 and parent_id = '341800000000' and dept_id not like '341800%'
where `status` = '0' and del_flag =0 and is_visible = '1' and parent_id = '341300000000' and dept_id not like '341300%'
union
<!--派出所等三级四级机构-->
@ -113,7 +113,7 @@
</if>
group by SUBSTRING(zzjgdm,1,8)) a
on SUBSTRING(d.dept_id,1,8) = SUBSTRING(a.zzjgdm,1,8)
where `status` = '0' and del_flag =0 and parent_id != '341800000000' and dept_id not like '341800%' and dept_id like '%0000'
where `status` = '0' and del_flag =0 and is_visible = '1' and parent_id != '341300000000' and parent_id != '0' and dept_id like '%0000'
<!--大队等三级四级机构-->
UNION
select dept_id as deptId,short_name as deptName,parent_id as parentId,IFNULL(onlineCount,0) as onlineCount,IFNULL(allCount,0) as allCount from sys_dept d
@ -133,7 +133,7 @@
</if>
group by SUBSTRING(zzjgdm,1,10)) a
on SUBSTRING(d.dept_id,1,10) = SUBSTRING(a.zzjgdm,1,10)
where `status` = '0' and del_flag =0 and parent_id != '341800000000' and parent_id != '0' and dept_id like '341800%' and dept_id like '%00'
where `status` = '0' and del_flag =0 and is_visible = '1' and parent_id != '341300000000' and parent_id != '0' and dept_id not like '%0000' and dept_id like '%00'
union
<!--四级机构-->
@ -154,7 +154,7 @@
</if>
group by zzjgdm) a
on d.dept_id = a.zzjgdm
where `status` = '0' and del_flag =0 and LENGTH(ancestors) = 40
where `status` = '0' and is_visible = '1' and del_flag =0 and LENGTH(ancestors) = 53
) a
order by a.deptId asc
</select>
@ -175,7 +175,7 @@
LEFT JOIN
(select substring(zzjgdm,1,8) as zzjgdm ,count(*) as allCount from t_device where device_type = #{deviceType} and valid =1 group by SUBSTRING(zzjgdm,1,8)) a
on SUBSTRING(d.dept_id,1,8) = SUBSTRING(a.zzjgdm,1,8)
where `status` = '0' and del_flag =0 and parent_id = '340100000000' and dept_id like '340100%'
where `status` = '0' and del_flag =0 and is_visible = '1' and parent_id = '341300000000' and dept_id like '341300%'
union
@ -187,7 +187,7 @@
LEFT JOIN
(select substring(zzjgdm,1,6) as zzjgdm ,count(*) as allCount from t_device where device_type = #{deviceType} and valid =1 group by SUBSTRING(zzjgdm,1,6)) a
on SUBSTRING(d.dept_id,1,6) = SUBSTRING(a.zzjgdm,1,6)
where `status` = '0' and del_flag =0 and parent_id = '340100000000' and dept_id not like '340100%'
where `status` = '0' and del_flag =0 and is_visible = '1' and parent_id = '341300000000' and dept_id not like '341300%'
union
<!--派出所等三级四级机构-->
@ -198,7 +198,7 @@
LEFT JOIN
(select substring(zzjgdm,1,8) as zzjgdm ,count(*) as allCount from t_device where device_type = #{deviceType} and valid = 1 group by SUBSTRING(zzjgdm,1,8)) a
on SUBSTRING(d.dept_id,1,8) = SUBSTRING(a.zzjgdm,1,8)
where `status` = '0' and del_flag =0 and parent_id != '340100000000' and dept_id not like '340100%' and dept_id like '%0000'
where `status` = '0' and del_flag =0 and is_visible = '1' and parent_id != '341300000000' and parent_id != '0' and dept_id like '%0000'
<!--大队等三级四级机构-->
UNION
select dept_id as deptId,short_name as deptName,parent_id as parentId,IFNULL(onlineCount,0) as onlineCount,IFNULL(allCount,0) as allCount from sys_dept d
@ -208,7 +208,7 @@
LEFT JOIN
(select substring(zzjgdm,1,10) as zzjgdm ,count(*) as allCount from t_device where device_type = #{deviceType} and valid = 1 group by SUBSTRING(zzjgdm,1,10)) a
on SUBSTRING(d.dept_id,1,10) = SUBSTRING(a.zzjgdm,1,10)
where `status` = '0' and del_flag =0 and parent_id != '340100000000' and parent_id != '0' and dept_id like '340100%' and dept_id like '%00'
where `status` = '0' and del_flag =0 and is_visible = '1' and parent_id != '341300000000' and parent_id != '0' and dept_id not like '%0000' and dept_id like '%00'
union
<!--四级机构-->
@ -219,7 +219,7 @@
LEFT JOIN
(select zzjgdm as zzjgdm ,count(*) as allCount from t_device where device_type = #{deviceType} and valid = 1 group by zzjgdm) a
on d.dept_id = a.zzjgdm
where `status` = '0' and del_flag =0 and LENGTH(ancestors) = 40
where `status` = '0' and del_flag =0 and is_visible = '1' and LENGTH(ancestors) = 53
) count_temp
<where>
<if test="deptId != null and deptId != ''">

View File

@ -1,18 +1,18 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="60 seconds" debug="false">
<!-- 日志存放路径 -->
<property name="log.path" value="logs/${project.artifactId}" />
<!-- 日志输出格式 -->
<property name="log.path" value="logs/${project.artifactId}" />
<!-- 日志输出格式 -->
<property name="console.log.pattern"
value="%red(%d{yyyy-MM-dd HH:mm:ss}) %green([%thread]) %highlight(%-5level) %boldMagenta(%logger{36}%n) - %msg%n"/>
<!-- 控制台输出 -->
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${console.log.pattern}</pattern>
<charset>utf-8</charset>
</encoder>
</appender>
<!-- &lt;!&ndash; 控制台输出 &ndash;&gt;
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${console.log.pattern}</pattern>
<charset>utf-8</charset>
</encoder>
</appender>-->
<include resource="logback-common.xml" />
@ -21,8 +21,66 @@
<!-- 开启 skywalking 日志收集 -->
<include resource="logback-skylog.xml" />
<!--系统操作日志-->
<root level="info">
<appender-ref ref="console" />
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--如果只是想要 Info 级别的日志,只是过滤 info 还是会输出 Error 日志,因为 Error 的级别高,
所以我们使用下面的策略,可以避免输出 Error 的日志-->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<!--过滤 Error-->
<level>ERROR</level>
<!--匹配到就禁止-->
<onMatch>DENY</onMatch>
<!--没有匹配到就允许-->
<onMismatch>ACCEPT</onMismatch>
</filter>
<!--日志名称如果没有File 属性那么只会使用FileNamePattern的文件路径规则
如果同时有<File><FileNamePattern>,那么当天日志是<File>,明天会自动把今天
的日志改名为今天的日期。即,<File> 的日志都是当天的。
-->
<File>${LOG_PATH}${LOG_FILE}</File>
<encoder>
<charset>UTF-8</charset>
<pattern>%date [%level] [%thread] %logger{60} [%file : %line] %msg%n</pattern>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_PATH}info/${LOG_FILE}.%d{yyyy-MM-dd}.%i.gz</fileNamePattern>
<maxFileSize>50MB</maxFileSize>
<maxHistory>20</maxHistory> <!-- 保留180天 -->
</rollingPolicy>
</appender>
<!--error log-->
<appender name="ERRORFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--如果只是想要 Error 级别的日志,那么需要过滤一下,默认是 info 级别的ThresholdFilter-->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>Error</level>
</filter>
<!--日志名称如果没有File 属性那么只会使用FileNamePattern的文件路径规则
如果同时有<File><FileNamePattern>,那么当天日志是<File>,明天会自动把今天
的日志改名为今天的日期。即,<File> 的日志都是当天的。
-->
<File>${LOG_PATH}error.${LOG_FILE}</File>
<!--滚动策略,按照时间滚动 TimeBasedRollingPolicy-->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy ">
<!--文件路径,定义了日志的切分方式——把每一天的日志归档到一个文件中,以防止日志填满整个磁盘空间-->
<FileNamePattern>${LOG_PATH}error/${LOG_FILE}.%d{yyyy-MM-dd}.%i.gz</FileNamePattern>
<!--只保留最近90天的日志-->
<maxFileSize>50MB</maxFileSize>
<maxHistory>180</maxHistory>
<!--用来指定日志文件的上限大小,那么到了这个值,就会删除旧的日志-->
<!--<totalSizeCap>1GB</totalSizeCap>-->
</rollingPolicy>
<!--日志输出编码格式化-->
<encoder>
<charset>UTF-8</charset>
<pattern>%date [%level] [%thread] %logger{60} [%file : %line] %msg%n</pattern>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="CONSOLE"/>
<appender-ref ref="FILE"/>
<appender-ref ref="ERRORFILE"/>
</root>
</configuration>

View File

@ -77,7 +77,7 @@ public class OriginalUdpReceiver implements ApplicationListener<ApplicationReady
logger.info("原生方式启动");
try {
// Executors.newSingleThreadExecutor().execute(new UDPProcess("53.238.79.4",Integer.parseInt("10013")));
Executors.newSingleThreadExecutor().execute(new UDPProcess("localhost",Integer.parseInt("10013")));
Executors.newSingleThreadExecutor().execute(new UDPProcess("53.176.146.100",Integer.parseInt("10013")));
} catch (SocketException e) {
e.printStackTrace();
}

View File

@ -1,18 +1,18 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="60 seconds" debug="false">
<!-- 日志存放路径 -->
<property name="log.path" value="logs/${project.artifactId}" />
<!-- 日志输出格式 -->
<property name="log.path" value="logs/${project.artifactId}" />
<!-- 日志输出格式 -->
<property name="console.log.pattern"
value="%red(%d{yyyy-MM-dd HH:mm:ss}) %green([%thread]) %highlight(%-5level) %boldMagenta(%logger{36}%n) - %msg%n"/>
<!-- 控制台输出 -->
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${console.log.pattern}</pattern>
<charset>utf-8</charset>
</encoder>
</appender>
<!-- &lt;!&ndash; 控制台输出 &ndash;&gt;
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${console.log.pattern}</pattern>
<charset>utf-8</charset>
</encoder>
</appender>-->
<include resource="logback-common.xml" />
@ -21,8 +21,66 @@
<!-- 开启 skywalking 日志收集 -->
<include resource="logback-skylog.xml" />
<!--系统操作日志-->
<root level="info">
<appender-ref ref="console" />
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--如果只是想要 Info 级别的日志,只是过滤 info 还是会输出 Error 日志,因为 Error 的级别高,
所以我们使用下面的策略,可以避免输出 Error 的日志-->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<!--过滤 Error-->
<level>ERROR</level>
<!--匹配到就禁止-->
<onMatch>DENY</onMatch>
<!--没有匹配到就允许-->
<onMismatch>ACCEPT</onMismatch>
</filter>
<!--日志名称如果没有File 属性那么只会使用FileNamePattern的文件路径规则
如果同时有<File><FileNamePattern>,那么当天日志是<File>,明天会自动把今天
的日志改名为今天的日期。即,<File> 的日志都是当天的。
-->
<File>${LOG_PATH}${LOG_FILE}</File>
<encoder>
<charset>UTF-8</charset>
<pattern>%date [%level] [%thread] %logger{60} [%file : %line] %msg%n</pattern>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_PATH}info/${LOG_FILE}.%d{yyyy-MM-dd}.%i.gz</fileNamePattern>
<maxFileSize>50MB</maxFileSize>
<maxHistory>20</maxHistory> <!-- 保留180天 -->
</rollingPolicy>
</appender>
<!--error log-->
<appender name="ERRORFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--如果只是想要 Error 级别的日志,那么需要过滤一下,默认是 info 级别的ThresholdFilter-->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>Error</level>
</filter>
<!--日志名称如果没有File 属性那么只会使用FileNamePattern的文件路径规则
如果同时有<File><FileNamePattern>,那么当天日志是<File>,明天会自动把今天
的日志改名为今天的日期。即,<File> 的日志都是当天的。
-->
<File>${LOG_PATH}error.${LOG_FILE}</File>
<!--滚动策略,按照时间滚动 TimeBasedRollingPolicy-->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy ">
<!--文件路径,定义了日志的切分方式——把每一天的日志归档到一个文件中,以防止日志填满整个磁盘空间-->
<FileNamePattern>${LOG_PATH}error/${LOG_FILE}.%d{yyyy-MM-dd}.%i.gz</FileNamePattern>
<!--只保留最近90天的日志-->
<maxFileSize>50MB</maxFileSize>
<maxHistory>180</maxHistory>
<!--用来指定日志文件的上限大小,那么到了这个值,就会删除旧的日志-->
<!--<totalSizeCap>1GB</totalSizeCap>-->
</rollingPolicy>
<!--日志输出编码格式化-->
<encoder>
<charset>UTF-8</charset>
<pattern>%date [%level] [%thread] %logger{60} [%file : %line] %msg%n</pattern>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="CONSOLE"/>
<appender-ref ref="FILE"/>
<appender-ref ref="ERRORFILE"/>
</root>
</configuration>

View File

@ -0,0 +1,86 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="60 seconds" debug="false">
<!-- 日志存放路径 -->
<property name="log.path" value="logs/${project.artifactId}" />
<!-- 日志输出格式 -->
<property name="console.log.pattern"
value="%red(%d{yyyy-MM-dd HH:mm:ss}) %green([%thread]) %highlight(%-5level) %boldMagenta(%logger{36}%n) - %msg%n"/>
<!-- &lt;!&ndash; 控制台输出 &ndash;&gt;
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${console.log.pattern}</pattern>
<charset>utf-8</charset>
</encoder>
</appender>-->
<include resource="logback-common.xml" />
<include resource="logback-logstash.xml" />
<!-- 开启 skywalking 日志收集 -->
<include resource="logback-skylog.xml" />
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--如果只是想要 Info 级别的日志,只是过滤 info 还是会输出 Error 日志,因为 Error 的级别高,
所以我们使用下面的策略,可以避免输出 Error 的日志-->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<!--过滤 Error-->
<level>ERROR</level>
<!--匹配到就禁止-->
<onMatch>DENY</onMatch>
<!--没有匹配到就允许-->
<onMismatch>ACCEPT</onMismatch>
</filter>
<!--日志名称如果没有File 属性那么只会使用FileNamePattern的文件路径规则
如果同时有<File><FileNamePattern>,那么当天日志是<File>,明天会自动把今天
的日志改名为今天的日期。即,<File> 的日志都是当天的。
-->
<File>${LOG_PATH}${LOG_FILE}</File>
<encoder>
<charset>UTF-8</charset>
<pattern>%date [%level] [%thread] %logger{60} [%file : %line] %msg%n</pattern>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${LOG_PATH}info/${LOG_FILE}.%d{yyyy-MM-dd}.%i.gz</fileNamePattern>
<maxFileSize>50MB</maxFileSize>
<maxHistory>20</maxHistory> <!-- 保留180天 -->
</rollingPolicy>
</appender>
<!--error log-->
<appender name="ERRORFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--如果只是想要 Error 级别的日志,那么需要过滤一下,默认是 info 级别的ThresholdFilter-->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>Error</level>
</filter>
<!--日志名称如果没有File 属性那么只会使用FileNamePattern的文件路径规则
如果同时有<File><FileNamePattern>,那么当天日志是<File>,明天会自动把今天
的日志改名为今天的日期。即,<File> 的日志都是当天的。
-->
<File>${LOG_PATH}error.${LOG_FILE}</File>
<!--滚动策略,按照时间滚动 TimeBasedRollingPolicy-->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy ">
<!--文件路径,定义了日志的切分方式——把每一天的日志归档到一个文件中,以防止日志填满整个磁盘空间-->
<FileNamePattern>${LOG_PATH}error/${LOG_FILE}.%d{yyyy-MM-dd}.%i.gz</FileNamePattern>
<!--只保留最近90天的日志-->
<maxFileSize>50MB</maxFileSize>
<maxHistory>180</maxHistory>
<!--用来指定日志文件的上限大小,那么到了这个值,就会删除旧的日志-->
<!--<totalSizeCap>1GB</totalSizeCap>-->
</rollingPolicy>
<!--日志输出编码格式化-->
<encoder>
<charset>UTF-8</charset>
<pattern>%date [%level] [%thread] %logger{60} [%file : %line] %msg%n</pattern>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="CONSOLE"/>
<appender-ref ref="FILE"/>
<appender-ref ref="ERRORFILE"/>
</root>
</configuration>

View File

@ -40,9 +40,10 @@ spring.sql.init.platform=mysql
db.num=1
### Connect URL of DB:
db.url.0=jdbc:mysql://127.0.0.1:3306/ry-config?characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC&allowPublicKeyRetrieval=true
#db.url.0=jdbc:mysql://127.0.0.1:3306/ry-config?characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC&allowPublicKeyRetrieval=true
db.url.0=jdbc:mysql://53.176.146.98:3306/wzhj-config?characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC&allowPublicKeyRetrieval=true
db.user.0=root
db.password.0=root
db.password.0=Ycgis!2509
### the maximum retry times for push
nacos.config.push.maxRetryTime=50