亳州位置汇聚

ds-bozhou
luyya 2025-07-05 15:14:40 +08:00
parent 83fac17fe1
commit ac0080fe7f
15 changed files with 190 additions and 176 deletions

View File

@ -1,87 +1,49 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="60 seconds" debug="false">
<!-- 日志存放路径 -->
<property name="log.path" value="logs/${project.artifactId}" />
<property name="log.file" value="${project.artifactId}" />
<property name="log.path" value="logs" />
<property name="log.file" value="auth" />
<property name="MAX_FILE_SIZE" value="10MB" />
<property name="MAX_HISTORY" value="30" />
<!-- 日志输出格式 -->
<property name="console.log.pattern"
value="%red(%d{yyyy-MM-dd HH:mm:ss}) %green([%thread]) %highlight(%-5level) %boldMagenta(%logger{36}%n) - %msg%n"/>
<!-- &lt;!&ndash; 控制台输出 &ndash;&gt;
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>${console.log.pattern}</pattern>
<charset>utf-8</charset>
</encoder>
</appender>-->
<include resource="logback-common.xml" />
<include resource="logback-logstash.xml" />
<!-- 开启 skywalking 日志收集 -->
<include resource="logback-skylog.xml" />
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--如果只是想要 Info 级别的日志,只是过滤 info 还是会输出 Error 日志,因为 Error 的级别高,
所以我们使用下面的策略,可以避免输出 Error 的日志-->
<!-- INFO日志Appender -->
<appender name="FILE_INFO" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/info.${log.file}.log</file>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<!--过滤 Error-->
<level>ERROR</level>
<!--匹配到就禁止-->
<onMatch>DENY</onMatch>
<!--没有匹配到就允许-->
<onMismatch>ACCEPT</onMismatch>
<level>INFO</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
<!--日志名称如果没有File 属性那么只会使用FileNamePattern的文件路径规则
如果同时有<File><FileNamePattern>,那么当天日志是<File>,明天会自动把今天
的日志改名为今天的日期。即,<File> 的日志都是当天的。
-->
<File>${log.path}${log.file}</File>
<encoder>
<charset>UTF-8</charset>
<pattern>%date [%level] [%thread] %logger{60} [%file : %line] %msg%n</pattern>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.path}info/${log.file}.%d{yyyy-MM-dd}.%i.gz</fileNamePattern>
<maxFileSize>50MB</maxFileSize>
<maxHistory>20</maxHistory> <!-- 保留180天 -->
<fileNamePattern>${log.path}/info/info.${log.file}.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
<maxHistory>${MAX_HISTORY}</maxHistory>
</rollingPolicy>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
</encoder>
</appender>
<!--error log-->
<appender name="ERRORFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--如果只是想要 Error 级别的日志,那么需要过滤一下,默认是 info 级别的ThresholdFilter-->
<!-- ERROR日志Appender -->
<appender name="FILE_ERROR" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/error.${log.file}.log</file>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>Error</level>
<level>ERROR</level>
</filter>
<!--日志名称如果没有File 属性那么只会使用FileNamePattern的文件路径规则
如果同时有<File><FileNamePattern>,那么当天日志是<File>,明天会自动把今天
的日志改名为今天的日期。即,<File> 的日志都是当天的。
-->
<File>${log.path}error.${log.file}</File>
<!--滚动策略,按照时间滚动 TimeBasedRollingPolicy-->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<!--文件路径,定义了日志的切分方式——把每一天的日志归档到一个文件中,以防止日志填满整个磁盘空间-->
<FileNamePattern>${log.path}error/${log.file}.%d{yyyy-MM-dd}.%i.gz</FileNamePattern>
<!--只保留最近90天的日志-->
<maxFileSize>50MB</maxFileSize>
<maxHistory>180</maxHistory>
<!--用来指定日志文件的上限大小,那么到了这个值,就会删除旧的日志-->
<!--<totalSizeCap>1GB</totalSizeCap>-->
<fileNamePattern>${log.path}/error/error.${log.file}.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
<maxHistory>${MAX_HISTORY}</maxHistory>
</rollingPolicy>
<!--日志输出编码格式化-->
<encoder>
<charset>UTF-8</charset>
<pattern>%date [%level] [%thread] %logger{60} [%file : %line] %msg%n</pattern>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
</encoder>
</appender>
<!-- 根Logger配置禁用控制台输出 -->
<root level="INFO">
<appender-ref ref="CONSOLE"/>
<appender-ref ref="FILE"/>
<appender-ref ref="ERRORFILE"/>
<appender-ref ref="FILE_INFO" />
<appender-ref ref="FILE_ERROR" />
</root>
</configuration>

View File

@ -23,7 +23,7 @@ dubbo:
address: redis://${spring.data.redis.host}:${spring.data.redis.port}
group: DUBBO_GROUP
username: dubbo
password: ruoyi123
password: Ycgis@2509
# 集群开关
sentinel: false
parameters:

View File

@ -112,32 +112,26 @@ public class IndexStaticsController extends BaseController {
* 线
* */
@PostMapping("/onLineBar")
public R onLineBar(){
List<SysDeptVo> deptVoList = deptService.getDsList();
List<DeviceStaticsVo> staticsVoList = deviceService.countByDs();
public R onLineBar(@RequestBody TDeviceBo bo){
SysDeptBo deptBo = new SysDeptBo();
deptBo.setIsVisible("1");
deptBo.setParentId(bo.getZzjgdm());
List<SysDeptVo> deptVoList = deptService.selectDeptList(deptBo);
List<DeviceStaticsVo> list = new ArrayList<>(); //用来接收处理后的统计结果
for (SysDeptVo deptVo : deptVoList) {
boolean bl = false; //用来统计结果是否有当前这个机构
for (DeviceStaticsVo staticsVo : staticsVoList) {
String deptId = staticsVo.getZzjgdm()+"00000000";
if (deptId.equals(deptVo.getDeptId())){
staticsVo.setZzjgdm(deptId);
staticsVo.setZzjgmc(deptVo.getDeptName().replaceAll("公安局",""));
int onlineCo = RedisUtils.searchKeys("org_code:"+staticsVo.getZzjgdm()+"*");
staticsVo.setOnlineCo(onlineCo);
list.add(staticsVo);
bl = true;
break;
}
}
if (!bl){
DeviceStaticsVo staticsVo = new DeviceStaticsVo();
staticsVo.setZzjgdm(deptVo.getDeptId());
staticsVo.setZzjgmc(deptVo.getDeptName().replaceAll("公安局",""));
staticsVo.setCo(0);
staticsVo.setOnlineCo(0);
list.add(staticsVo);
}
DeviceStaticsVo staticsVo = new DeviceStaticsVo();
TDeviceBo deviceBo = new TDeviceBo();
deviceBo.setZzjgdm(deptVo.getDeptId());
deviceBo.setValid(1);
DeviceRedis redis = new DeviceRedis();
redis.setZzjgdm(deptVo.getDeptId());
Long co = deviceService.countByCondition(deviceBo);
Long online = redisService.countByZzjgdm(redis);
staticsVo.setZzjgdm(deptVo.getDeptId());
staticsVo.setZzjgmc(deptVo.getShortName());
staticsVo.setCo(co);
staticsVo.setOnlineCo(online);
list.add(staticsVo);
}
return R.ok(list);
}

View File

@ -15,8 +15,8 @@ public class DeviceStaticsVo implements Serializable {
private String zzjgdm;
private Integer co;
private Long co;
private Integer onlineCo;
private Long onlineCo;
}

View File

@ -1,5 +1,8 @@
package org.dromara.system.mapper;
import com.baomidou.mybatisplus.core.conditions.Wrapper;
import com.baomidou.mybatisplus.core.toolkit.Constants;
import org.apache.ibatis.annotations.Param;
import org.dromara.common.mybatis.core.mapper.BaseMapperPlus;
import org.dromara.system.domain.DeviceRedis;
import org.dromara.system.domain.vo.DeviceRedisVo;
@ -11,4 +14,6 @@ public interface DeviceRedisMapper extends BaseMapperPlus<DeviceRedis,DeviceRedi
int insertBatch(List<DeviceRedis> list);
List<DeviceRedisVo> countByCondition(DeviceRedis redis);
Long countByzzjgdm(@Param(Constants.WRAPPER) Wrapper<DeviceRedis> queryWrapper);
}

View File

@ -69,4 +69,21 @@ public class DeviceRedisSchedule {
RedisUtils.batchInsert(deviceInfoDataMap,-1);
}
/*
* 10
* */
// @Scheduled(cron = "0 0 0/1 * * ?")
public void deleteZfjly(){
deviceService.deleteZfjly();
}
/*
* 线
* */
// @Scheduled(cron = "0 0 0/1 * * ?")
public void removeRedis(){
deviceService.deleteRedis();
}
}

View File

@ -9,4 +9,6 @@ public interface IDeviceRedisService {
int insertBatch(List<DeviceRedis> list);
List<DeviceRedisVo> countByCondition(DeviceRedis redis);
Long countByZzjgdm(DeviceRedis deviceRedis);
}

View File

@ -85,4 +85,8 @@ public interface ITDeviceService {
TDeviceVo queryByDeviceCode(String deviceCode);
List<TDeviceExportVo> selectDeviceExportList(TDeviceBo bo);
int deleteZfjly();
int deleteRedis();
}

View File

@ -1,15 +1,20 @@
package org.dromara.system.service.impl;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.dubbo.config.annotation.DubboReference;
import org.apache.ibatis.session.ExecutorType;
import org.apache.ibatis.session.SqlSession;
import org.apache.ibatis.session.SqlSessionFactory;
import org.dromara.system.api.RemoteDataScopeService;
import org.dromara.system.domain.DeviceRedis;
import org.dromara.system.domain.TDevice;
import org.dromara.system.domain.vo.DeviceRedisVo;
import org.dromara.system.mapper.DeviceRedisMapper;
import org.dromara.system.service.IDeviceRedisService;
import org.dromara.system.service.ISysDeptService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
@ -23,6 +28,9 @@ public class DeviceRedisServiceImpl implements IDeviceRedisService {
private final DeviceRedisMapper baseMapper;
@DubboReference
private RemoteDataScopeService remoteDataScopeService;
@Autowired
SqlSessionFactory sqlSessionFactory;
@ -61,4 +69,13 @@ public class DeviceRedisServiceImpl implements IDeviceRedisService {
public List<DeviceRedisVo> countByCondition(DeviceRedis redis) {
return baseMapper.countByCondition(redis);
}
@Override
public Long countByZzjgdm(DeviceRedis deviceRedis) {
LambdaQueryWrapper<DeviceRedis> lqw = new LambdaQueryWrapper<>();
lqw.ne(DeviceRedis::getDeviceType,"9");
String depts = remoteDataScopeService.getDeptAndChild(deviceRedis.getZzjgdm());
lqw.inSql(DeviceRedis::getZzjgdm,depts);
return baseMapper.countByzzjgdm(lqw);
}
}

View File

@ -82,8 +82,8 @@ public class SysDeptServiceImpl implements ISysDeptService {
lqw.eq(StringUtils.isNotBlank(bo.getParentId()), SysDept::getParentId, bo.getParentId());
lqw.like(StringUtils.isNotBlank(bo.getDeptName()), SysDept::getDeptName, bo.getDeptName());
lqw.eq(StringUtils.isNotBlank(bo.getStatus()), SysDept::getStatus, bo.getStatus());
lqw.eq(StringUtils.isNotBlank(bo.getIsVisible()),SysDept::getIsVisible,bo.getIsVisible());
lqw.eq(StringUtils.isNotBlank(bo.getFullName()), SysDept::getFullName, bo.getFullName());
lqw.eq(StringUtils.isNotBlank(bo.getIsVisible()),SysDept::getIsVisible,bo.getIsVisible());
lqw.orderByAsc(SysDept::getAncestors);
lqw.orderByAsc(SysDept::getParentId);
lqw.orderByAsc(SysDept::getOrderNum);
@ -369,7 +369,7 @@ public class SysDeptServiceImpl implements ISysDeptService {
@Override
public List<SysDeptVo> deviceStatics(String deviceType,String manageDeptId) {
if(!manageDeptId.equals("341300000000")){
if(!manageDeptId.equals("341600000000")){
String subManageId = manageDeptId.substring(0,findLastNonZeroIndex(manageDeptId) + 1);
return baseMapper.deviceStaticsByDeptId(deviceType,subManageId);

View File

@ -14,9 +14,14 @@ import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.baomidou.mybatisplus.core.toolkit.Wrappers;
import lombok.RequiredArgsConstructor;
import org.dromara.common.redis.utils.RedisUtils;
import org.dromara.system.api.RemoteDataScopeService;
import org.dromara.system.domain.DeviceRedis;
import org.dromara.system.domain.vo.DeviceStaticsVo;
import org.dromara.system.domain.vo.TDeviceExportVo;
import org.dromara.system.mapper.DeviceRedisMapper;
import org.dromara.system.mapper.SysConfigMapper;
import org.dromara.system.service.ISysConfigService;
import org.springframework.stereotype.Service;
import org.dromara.system.domain.bo.TDeviceBo;
import org.dromara.system.domain.vo.TDeviceVo;
@ -43,6 +48,12 @@ public class TDeviceServiceImpl implements ITDeviceService {
private final TDeviceMapper baseMapper;
private final ISysConfigService configService;
private final DeviceRedisMapper redisMapper;
private String lastRemoveTime;
@DubboReference
private RemoteDataScopeService remoteDataScopeService;
@ -120,6 +131,53 @@ public class TDeviceServiceImpl implements ITDeviceService {
return baseMapper.selectDeviceExportList(lqw);
}
/*
*
* */
@Override
public int deleteZfjly() {
String day = configService.selectConfigByKey("device.remove.day");
LambdaQueryWrapper<TDevice> lqw = new LambdaQueryWrapper<>();
lqw.inSql(TDevice::getDeviceType,"'5','7','8'");
lqw.eq(TDevice::getValid,"1");
lqw.le(TDevice::getUpdateTime,DateUtil.offsetDay(new Date(),Integer.getInteger(day)));
List<TDevice> list = baseMapper.selectList(lqw);
for (TDevice device : list) {
device.setValid(0);
device.setUpdateTime(DateUtil.now());
baseMapper.updateById(device);
}
return 0;
}
/*
* t_device_redis
* */
@Override
public int deleteRedis() {
LambdaQueryWrapper<TDevice> lqw = new LambdaQueryWrapper<>();
lqw.gt(StringUtils.isNotBlank(lastRemoveTime),TDevice::getUpdateTime,lastRemoveTime);
lqw.eq(TDevice::getValid,0);
lqw.orderByDesc(TDevice::getUpdateTime);
List<TDeviceVo> list = baseMapper.selectVoList(lqw);
try {
if (list.size()>0){
lastRemoveTime = list.get(0).getUpdateTime();
for (TDeviceVo deviceVo : list) {
RedisUtils.del("online_users:"+deviceVo.getDeviceType()+":"+deviceVo.getDeviceCode());
LambdaQueryWrapper<DeviceRedis> dqw = new LambdaQueryWrapper<>();
dqw.eq(DeviceRedis::getDeviceCode,deviceVo.getDeviceCode());
redisMapper.delete(dqw);
}
}
}catch (Exception e){
}
return 0;
}
@Override
public TDeviceVo queryOne(TDeviceBo bo) {
LambdaQueryWrapper<TDevice> lqw = buildQueryWrapper(bo);

View File

@ -46,4 +46,9 @@
WHERE d.dict_type = 'zd_device_type'
</select>
<select id="countByzzjgdm" resultType="Long">
select count(*) from t_device_redis
${ew.getCustomSqlSegment}
</select>
</mapper>

View File

@ -49,8 +49,8 @@
#{remark1},#{remark2},#{cardNum},#{createTime},#{updateTime})
ON DUPLICATE KEY UPDATE
police_no = values(police_no),police_name = values(police_name),phone_num = values(phone_num),
car_num = values(car_num),valid = values(valid),remark1 = values(remark1),remark2 = values(remark2),card_num = values(card_num),
update_time = now()
car_num = values(car_num),valid = '1',remark1 = values(remark1),remark2 = values(remark2),card_num = values(card_num),
update_time = values(update_time)
</insert>
</mapper>

View File

@ -43,7 +43,7 @@ db.num=1
#db.url.0=jdbc:mysql://127.0.0.1:3306/ry-config?characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC&allowPublicKeyRetrieval=true
db.url.0=jdbc:mysql://10.129.221.10:3306/wzhj-config?characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC&allowPublicKeyRetrieval=true
db.user.0=root
db.password.0=Ycgis!2509
db.password.0=Ycgis@2509
### the maximum retry times for push
nacos.config.push.maxRetryTime=50

View File

@ -1,99 +1,49 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="60 seconds" debug="false">
<!-- 日志存放路径 -->
<property name="log.path" value="logs/${project.artifactId}" />
<property name="log.file" value="${project.artifactId}" />
<property name="log.path" value="logs" />
<property name="log.file" value="gateway" />
<property name="MAX_FILE_SIZE" value="10MB" />
<property name="MAX_HISTORY" value="30" />
<!-- 日志输出格式 -->
<property name="console.log.pattern"
value="%red(%d{yyyy-MM-dd HH:mm:ss}) %green([%thread]) %highlight(%-5level) %boldMagenta(%logger{36}%n) - %msg%n"/>
<!-- 控制台输出 -->
<appender name="file_console" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/console.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!-- 日志文件名格式 -->
<fileNamePattern>${log.path}/console.%d{yyyy-MM-dd}.log</fileNamePattern>
<!-- 日志最大 1天 -->
<maxHistory>1</maxHistory>
</rollingPolicy>
<encoder>
<pattern>${log.pattern}</pattern>
<charset>utf-8</charset>
</encoder>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<!-- 过滤的级别 -->
<level>INFO</level>
</filter>
</appender>
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--如果只是想要 Info 级别的日志,只是过滤 info 还是会输出 Error 日志,因为 Error 的级别高,
所以我们使用下面的策略,可以避免输出 Error 的日志-->
<!-- INFO日志Appender -->
<appender name="FILE_INFO" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/info.${log.file}.log</file>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<!--过滤 Error-->
<level>ERROR</level>
<!--匹配到就禁止-->
<onMatch>DENY</onMatch>
<!--没有匹配到就允许-->
<onMismatch>ACCEPT</onMismatch>
<level>INFO</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
<!--日志名称如果没有File 属性那么只会使用FileNamePattern的文件路径规则
如果同时有<File><FileNamePattern>,那么当天日志是<File>,明天会自动把今天
的日志改名为今天的日期。即,<File> 的日志都是当天的。
-->
<File>${log.path}${log.file}</File>
<encoder>
<charset>UTF-8</charset>
<pattern>%date [%level] [%thread] %logger{60} [%file : %line] %msg%n</pattern>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.path}info/${log.file}.%d{yyyy-MM-dd}.%i.gz</fileNamePattern>
<maxFileSize>50MB</maxFileSize>
<maxHistory>20</maxHistory> <!-- 保留180天 -->
<fileNamePattern>${log.path}/info/info.${log.file}.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
<maxHistory>${MAX_HISTORY}</maxHistory>
</rollingPolicy>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
</encoder>
</appender>
<!--error log-->
<appender name="ERRORFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--如果只是想要 Error 级别的日志,那么需要过滤一下,默认是 info 级别的ThresholdFilter-->
<!-- ERROR日志Appender -->
<appender name="FILE_ERROR" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/error.${log.file}.log</file>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>Error</level>
<level>ERROR</level>
</filter>
<!--日志名称如果没有File 属性那么只会使用FileNamePattern的文件路径规则
如果同时有<File><FileNamePattern>,那么当天日志是<File>,明天会自动把今天
的日志改名为今天的日期。即,<File> 的日志都是当天的。
-->
<File>${log.path}error.${log.file}</File>
<!--滚动策略,按照时间滚动 TimeBasedRollingPolicy-->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<!--文件路径,定义了日志的切分方式——把每一天的日志归档到一个文件中,以防止日志填满整个磁盘空间-->
<FileNamePattern>${log.path}error/${log.file}.%d{yyyy-MM-dd}.%i.gz</FileNamePattern>
<!--只保留最近90天的日志-->
<maxFileSize>50MB</maxFileSize>
<maxHistory>180</maxHistory>
<!--用来指定日志文件的上限大小,那么到了这个值,就会删除旧的日志-->
<!--<totalSizeCap>1GB</totalSizeCap>-->
<fileNamePattern>${log.path}/error/error.${log.file}.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
<maxHistory>${MAX_HISTORY}</maxHistory>
</rollingPolicy>
<!--日志输出编码格式化-->
<encoder>
<charset>UTF-8</charset>
<pattern>%date [%level] [%thread] %logger{60} [%file : %line] %msg%n</pattern>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
</encoder>
</appender>
<include resource="logback-logstash.xml" />
<!-- 开启 skywalking 日志收集 -->
<include resource="logback-skylog.xml" />
<!-- 根Logger配置禁用控制台输出 -->
<root level="INFO">
<appender-ref ref="console"/>
<appender-ref ref="FILE"/>
<appender-ref ref="ERRORFILE"/>
<appender-ref ref="file_console"/>
<appender-ref ref="FILE_INFO" />
<appender-ref ref="FILE_ERROR" />
</root>
</configuration>