省厅位置汇聚数据定时任务修改 可配置

stwzhj
luyya 2025-11-21 09:24:56 +08:00
parent 7cb4a65d27
commit 659a894d67
6 changed files with 125 additions and 74 deletions

View File

@ -53,6 +53,11 @@
<artifactId>stwzhj-common-mybatis</artifactId>
</dependency>
<dependency>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-common-job</artifactId>
</dependency>
<dependency>
<groupId>org.dromara</groupId>
<artifactId>stwzhj-common-dubbo</artifactId>

View File

@ -1,56 +0,0 @@
package org.dromara.data2es.schedule;
import cn.hutool.core.bean.BeanUtil;
import cn.hutool.core.date.DateUnit;
import cn.hutool.core.date.DateUtil;
import cn.hutool.json.JSONObject;
import org.dromara.common.redis.utils.RedisUtils;
import org.dromara.data2es.domain.EsGpsInfoVO2;
import org.dromara.data2es.service.IGpsService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Configuration;
import org.springframework.scheduling.annotation.Scheduled;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
/**
* <p>description: </p>
*
* @author chenle
* @date 2021-05-18 18:23
*/
@Configuration
public class RedisOnlineUserSchedule {
@Autowired
IGpsService gpsService;
@Scheduled(cron = "0 0/20 * * * ?")
public void redisTimeOutRemove(){
List<JSONObject> jlist = RedisUtils.searchAndGetKeysValues("online_users:*");
List<EsGpsInfoVO2> gpsInfoVO2s = new ArrayList<>();
for (JSONObject job : jlist) {
String deviceType = job.getStr("deviceType");
if ("05".equals(deviceType)){
continue;
}
Integer online = job.getInt("online");
if (0 == online){
continue;
}
EsGpsInfoVO2 vo2 = BeanUtil.toBean(job, EsGpsInfoVO2.class);
if (1 == vo2.getOnline() && DateUtil.between(vo2.getGpsTime(), new Date(), DateUnit.SECOND) > 1800L){
vo2.setOnline(0);
gpsInfoVO2s.add(vo2);
}
}
if (gpsInfoVO2s.size() > 0){
gpsService.updateDataStatus(gpsInfoVO2s);
}
}
}

View File

@ -0,0 +1,83 @@
package org.dromara.data2es.schedule;
import cn.hutool.core.bean.BeanUtil;
import cn.hutool.core.date.DateUnit;
import cn.hutool.core.date.DateUtil;
import cn.hutool.json.JSONObject;
import com.aizuda.snailjob.client.job.core.annotation.JobExecutor;
import com.aizuda.snailjob.client.job.core.dto.JobArgs;
import com.aizuda.snailjob.client.model.ExecuteResult;
import com.aizuda.snailjob.common.core.model.JobContext;
import com.aizuda.snailjob.common.log.SnailJobLog;
import lombok.extern.slf4j.Slf4j;
import org.dromara.common.redis.utils.RedisUtils;
import org.dromara.data2es.domain.EsGpsInfoVO2;
import org.dromara.data2es.service.IGpsService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Configuration;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
/**
* <p>description: </p>
*
*/
@Slf4j
@Component
@JobExecutor(name = "redisOnlineUserTimeoutRemove") // 任务执行器名称,需与控制台一致
public class RedisOnlineUserTimeoutExecutor {
@Autowired
private IGpsService gpsService;
public ExecuteResult jobExecute(JobArgs jobArgs) {
try {
SnailJobLog.LOCAL.info("开始执行 Redis 在线用户超时清理任务");
List<JSONObject> jlist = RedisUtils.searchAndGetKeysValues("online_users:*");
List<EsGpsInfoVO2> gpsInfoVO2s = new ArrayList<>();
for (JSONObject job : jlist) {
String deviceType = job.getStr("deviceType");
Integer online = job.getInt("online");
if (0 == online) {
continue;
}
EsGpsInfoVO2 vo2 = BeanUtil.toBean(job, EsGpsInfoVO2.class);
if ("05".equals(deviceType)) {
if (1 == vo2.getOnline() && DateUtil.between(vo2.getGpsTime(), new Date(), DateUnit.SECOND) > 86400L) {
vo2.setOnline(0);
gpsInfoVO2s.add(vo2);
}
}else {
if (1 == vo2.getOnline() && DateUtil.between(vo2.getGpsTime(), new Date(), DateUnit.SECOND) > 1800L) {
vo2.setOnline(0);
gpsInfoVO2s.add(vo2);
}
}
}
if (!gpsInfoVO2s.isEmpty()) {
gpsService.updateDataStatus(gpsInfoVO2s);
String msg = "成功更新 " + gpsInfoVO2s.size() + " 个超时设备为离线状态";
SnailJobLog.LOCAL.info(msg);
return ExecuteResult.success(msg);
} else {
SnailJobLog.LOCAL.info("无超时设备需要处理");
return ExecuteResult.success("无超时设备");
}
} catch (Exception e) {
String errorMsg = "Redis在线用户清理任务异常: " + e.getMessage();
SnailJobLog.LOCAL.error(errorMsg, e);
return ExecuteResult.failure(errorMsg);
}
}
}

View File

@ -124,7 +124,7 @@ public class GpsServiceImpl implements IGpsService {
requestHandler.sendToKafka(info);
}
requestHandler.redisOnlineUserBatch(onlineUserDataMap, 864000); //存放30天
requestHandler.redisOnlineUserBatch(onlineUserDataMap, 864000); //存放10天
requestHandler.redisOnlineUserBatch(orgCodeDataMap, 1800); //此处和buildRedisMap方法判断在线的时间一直
// requestHandler.redisDeleteBatch(deleteKeys); //此处换成了数据库的模式 所以这个就不用了

View File

@ -46,7 +46,7 @@ public class DeviceRedisSchedule {
}
@PostConstruct
@Scheduled(cron = "0 0 0/2 * * ?")
@Scheduled(cron = "0 0 0/1 * * ?")
public void handleDeviceInfoToRedis(){
if (null == lastUpdateTime || "".equals(lastUpdateTime)){
log.error("lastUpdateTime=null");

View File

@ -1,26 +1,44 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="60 seconds" debug="false">
<!-- 日志存放路径 -->
<property name="log.path" value="logs/${project.artifactId}"/>
<property name="log.path" value="logs" />
<property name="log.file" value="job" />
<property name="MAX_FILE_SIZE" value="50MB" />
<property name="MAX_HISTORY" value="30" />
<!-- 日志输出格式 -->
<property name="console.log.pattern"
value="%red(%d{yyyy-MM-dd HH:mm:ss}) %green([%thread]) %highlight(%-5level) %boldMagenta(%logger{36}%n) - %msg%n"/>
<!-- 控制台输出 -->
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<!-- INFO日志Appender -->
<appender name="FILE_INFO" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/info.${log.file}.log</file>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>INFO</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.path}/info/info.${log.file}.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
<maxHistory>${MAX_HISTORY}</maxHistory>
</rollingPolicy>
<encoder>
<pattern>${console.log.pattern}</pattern>
<charset>utf-8</charset>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
</encoder>
</appender>
<include resource="logback-common.xml" />
<include resource="logback-logstash.xml" />
<!-- 开启 skywalking 日志收集 -->
<include resource="logback-skylog.xml" />
<!-- ERROR日志Appender -->
<appender name="FILE_ERROR" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/error.${log.file}.log</file>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>ERROR</level>
</filter>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.path}/error/error.${log.file}.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
<maxHistory>${MAX_HISTORY}</maxHistory>
</rollingPolicy>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
</encoder>
</appender>
<!-- SnailJob appender -->
<appender name="snail_log_server_appender" class="com.aizuda.snailjob.server.common.appender.SnailJobServerLogbackAppender">
@ -28,7 +46,8 @@
<!--系统操作日志-->
<root level="info">
<appender-ref ref="console"/>
<appender-ref ref="FILE_INFO" />
<appender-ref ref="FILE_ERROR" />
<appender-ref ref="snail_log_server_appender" />
</root>