Compare commits
33 Commits
| Author | SHA1 | Date |
|---|---|---|
|
|
7b3586d48c | |
|
|
6b5aa718d2 | |
|
|
a8f0b96a75 | |
|
|
ddc8d4a8bd | |
|
|
c1152bf7d8 | |
|
|
fd95d19e9c | |
|
|
45167e5874 | |
|
|
8364d79d05 | |
|
|
3de061a843 | |
|
|
1b0876d735 | |
|
|
09e390b79d | |
|
|
7346030a2f | |
|
|
e5d32cce80 | |
|
|
61b89e502b | |
|
|
659a894d67 | |
|
|
7cb4a65d27 | |
|
|
13f46756c8 | |
|
|
e5a166d4a3 | |
|
|
16f8b6dec5 | |
|
|
81187efdbd | |
|
|
3bd524964b | |
|
|
a01837bf6d | |
|
|
87882579c7 | |
|
|
769bcc6d9f | |
|
|
c22bae090e | |
|
|
549e085df8 | |
|
|
bd7fbfbde4 | |
|
|
1f906ebb39 | |
|
|
caf0ee6c5b | |
|
|
5f9ecaa366 | |
|
|
91a54e2d26 | |
|
|
040885e507 | |
|
|
8febeeb5e4 |
6
pom.xml
6
pom.xml
|
|
@ -89,12 +89,12 @@
|
|||
<id>prod</id>
|
||||
<properties>
|
||||
<profiles.active>prod</profiles.active>
|
||||
<nacos.server>127.0.0.1:8848</nacos.server>
|
||||
<nacos.server>53.16.17.13:8848</nacos.server>
|
||||
<nacos.discovery.group>DEFAULT_GROUP</nacos.discovery.group>
|
||||
<nacos.config.group>DEFAULT_GROUP</nacos.config.group>
|
||||
<nacos.username>nacos</nacos.username>
|
||||
<nacos.password>nacos</nacos.password>
|
||||
<logstash.address>127.0.0.1:4560</logstash.address>
|
||||
<nacos.password>Ycgis!2509</nacos.password>
|
||||
<logstash.address>53.16.17.13:4560</logstash.address>
|
||||
</properties>
|
||||
</profile>
|
||||
</profiles>
|
||||
|
|
|
|||
|
|
@ -15,8 +15,12 @@ public interface RemoteDeviceService {
|
|||
|
||||
List<RemoteDeviceVo> deviceList(RemoteDeviceBo bo);
|
||||
|
||||
List<RemoteDeviceVo> deviceListPage(RemoteDeviceBo bo);
|
||||
|
||||
R saveDeviceToSt(String infoSource, List<RemoteDeviceBo> list);
|
||||
|
||||
RemoteDeviceVo getDeviceInfo(String deviceCode,String deviceType);
|
||||
|
||||
Long deviceCount();
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -88,6 +88,10 @@ public class RemoteDeviceBo implements Serializable {
|
|||
*/
|
||||
private String remark2;
|
||||
|
||||
private String gbbm;
|
||||
|
||||
private String tdbm;
|
||||
|
||||
private String lrdwdm;
|
||||
|
||||
private String lrdwmc;
|
||||
|
|
|
|||
|
|
@ -104,4 +104,8 @@ public class RemoteDeviceVo implements Serializable {
|
|||
private String xgrxm;
|
||||
|
||||
private String xgrsfzh;
|
||||
|
||||
private String gbbm;
|
||||
|
||||
private String tdbm;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -130,6 +130,8 @@ public class LoginUser implements Serializable {
|
|||
*/
|
||||
private String deviceType;
|
||||
|
||||
private String manageDeptId;
|
||||
|
||||
/**
|
||||
* 获取登录id
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -1,28 +1,49 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<configuration scan="true" scanPeriod="60 seconds" debug="false">
|
||||
<!-- 日志存放路径 -->
|
||||
<property name="log.path" value="logs/${project.artifactId}"/>
|
||||
<property name="log.path" value="logs" />
|
||||
<property name="log.file" value="auth" />
|
||||
<property name="MAX_FILE_SIZE" value="50MB" />
|
||||
<property name="MAX_HISTORY" value="30" />
|
||||
<!-- 日志输出格式 -->
|
||||
<property name="console.log.pattern"
|
||||
value="%red(%d{yyyy-MM-dd HH:mm:ss}) %green([%thread]) %highlight(%-5level) %boldMagenta(%logger{36}%n) - %msg%n"/>
|
||||
|
||||
<!-- 控制台输出 -->
|
||||
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<!-- INFO日志Appender -->
|
||||
<appender name="FILE_INFO" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.path}/info.${log.file}.log</file>
|
||||
<filter class="ch.qos.logback.classic.filter.LevelFilter">
|
||||
<level>INFO</level>
|
||||
<onMatch>ACCEPT</onMatch>
|
||||
<onMismatch>DENY</onMismatch>
|
||||
</filter>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${log.path}/info/info.${log.file}.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
|
||||
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
|
||||
<maxHistory>${MAX_HISTORY}</maxHistory>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>${console.log.pattern}</pattern>
|
||||
<charset>utf-8</charset>
|
||||
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<include resource="logback-common.xml" />
|
||||
<!-- ERROR日志Appender -->
|
||||
<appender name="FILE_ERROR" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.path}/error.${log.file}.log</file>
|
||||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||
<level>ERROR</level>
|
||||
</filter>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${log.path}/error/error.${log.file}.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
|
||||
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
|
||||
<maxHistory>${MAX_HISTORY}</maxHistory>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<include resource="logback-logstash.xml" />
|
||||
|
||||
<!-- 开启 skywalking 日志收集 -->
|
||||
<include resource="logback-skylog.xml" />
|
||||
|
||||
<!--系统操作日志-->
|
||||
<root level="info">
|
||||
<appender-ref ref="console"/>
|
||||
<!-- 根Logger配置(禁用控制台输出) -->
|
||||
<root level="INFO">
|
||||
<appender-ref ref="FILE_INFO" />
|
||||
<appender-ref ref="FILE_ERROR" />
|
||||
</root>
|
||||
|
||||
</configuration>
|
||||
|
|
|
|||
|
|
@ -17,11 +17,14 @@ public class RedisConstants {
|
|||
|
||||
public static final String ONLINE_USERS_GEO = "geo_hash";
|
||||
|
||||
public static final long REDIS_ONLINE_USER_EXPIRE_TIME = 60 * 60 * 24;
|
||||
public static final long REDIS_NEVER_EXPIRE = 0L;
|
||||
|
||||
public static final long FIVE_MINUTES_REDIS_ONLINE_USER_EXPIRE_TIME = 60 * 5;
|
||||
|
||||
public static final String ONLINE_USERS_TEN = "ten:online_users:";
|
||||
|
||||
public static final long REDIS_ONLINE_USER_NEVER_EXPIRE = -1;
|
||||
|
||||
public static final long FIVE_MINUTES_REDIS_ONLINE_USER_EXPIRE_TIME = 60 * 5;
|
||||
|
||||
|
||||
public static String getUserTokenKey(String token) {
|
||||
|
|
|
|||
|
|
@ -49,10 +49,15 @@ import static org.apache.dubbo.metadata.report.support.Constants.DEFAULT_METADAT
|
|||
public class RedisMetadataReport extends AbstractMetadataReport {
|
||||
|
||||
private static final String REDIS_DATABASE_KEY = "database";
|
||||
|
||||
private static final String SENTINEL_KEY = "sentinel";
|
||||
private static final ErrorTypeAwareLogger logger = LoggerFactory.getErrorTypeAwareLogger(RedisMetadataReport.class);
|
||||
|
||||
// protected , for test
|
||||
protected JedisPool pool;
|
||||
|
||||
protected JedisSentinelPool sentinelPool;
|
||||
|
||||
private Set<HostAndPort> jedisClusterNodes;
|
||||
private int timeout;
|
||||
private String password;
|
||||
|
|
@ -75,6 +80,14 @@ public class RedisMetadataReport extends AbstractMetadataReport {
|
|||
for (URL tmpUrl : urls) {
|
||||
jedisClusterNodes.add(new HostAndPort(tmpUrl.getHost(), tmpUrl.getPort()));
|
||||
}
|
||||
} else if (url.getParameter(SENTINEL_KEY,false)) {
|
||||
Set<String> sentinels = new HashSet<>();
|
||||
List<URL> urls = url.getBackupUrls();
|
||||
for (URL tmpUrl : urls) {
|
||||
sentinels.add(tmpUrl.getHost()+":"+ tmpUrl.getPort());
|
||||
}
|
||||
int database = url.getParameter(REDIS_DATABASE_KEY, 0);
|
||||
sentinelPool = new JedisSentinelPool("mymaster",sentinels ,new GenericObjectPoolConfig<>(), timeout, password, database);
|
||||
} else {
|
||||
int database = url.getParameter(REDIS_DATABASE_KEY, 0);
|
||||
pool = new JedisPool(new JedisPoolConfig(), url.getHost(), url.getPort(), timeout, password, database);
|
||||
|
|
@ -128,11 +141,25 @@ public class RedisMetadataReport extends AbstractMetadataReport {
|
|||
private void storeMetadata(BaseMetadataIdentifier metadataIdentifier, String v) {
|
||||
if (pool != null) {
|
||||
storeMetadataStandalone(metadataIdentifier, v);
|
||||
}else if(sentinelPool != null) {
|
||||
storeMetadataInSentinel(metadataIdentifier, v);
|
||||
} else {
|
||||
storeMetadataInCluster(metadataIdentifier, v);
|
||||
}
|
||||
}
|
||||
|
||||
private void storeMetadataInSentinel(BaseMetadataIdentifier metadataIdentifier, String v) {
|
||||
try (Jedis jedisSentinel = sentinelPool.getResource()) {
|
||||
jedisSentinel.set(metadataIdentifier.getUniqueKey(KeyTypeEnum.UNIQUE_KEY), v, jedisParams);
|
||||
} catch (Throwable e) {
|
||||
String msg =
|
||||
"Failed to put " + metadataIdentifier + " to redis cluster " + v + ", cause: " + e.getMessage();
|
||||
logger.error(TRANSPORT_FAILED_RESPONSE, "", "", msg, e);
|
||||
throw new RpcException(msg, e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void storeMetadataInCluster(BaseMetadataIdentifier metadataIdentifier, String v) {
|
||||
try (JedisCluster jedisCluster =
|
||||
new JedisCluster(jedisClusterNodes, timeout, timeout, 2, password, new GenericObjectPoolConfig<>())) {
|
||||
|
|
@ -158,11 +185,24 @@ public class RedisMetadataReport extends AbstractMetadataReport {
|
|||
private void deleteMetadata(BaseMetadataIdentifier metadataIdentifier) {
|
||||
if (pool != null) {
|
||||
deleteMetadataStandalone(metadataIdentifier);
|
||||
}else if(sentinelPool != null) {
|
||||
deleteMetadataSentinel(metadataIdentifier);
|
||||
} else {
|
||||
deleteMetadataInCluster(metadataIdentifier);
|
||||
}
|
||||
}
|
||||
|
||||
private void deleteMetadataSentinel(BaseMetadataIdentifier metadataIdentifier) {
|
||||
try (Jedis jedisSentinel = sentinelPool.getResource()) {
|
||||
jedisSentinel.del(metadataIdentifier.getUniqueKey(KeyTypeEnum.UNIQUE_KEY));
|
||||
} catch (Throwable e) {
|
||||
String msg = "Failed to delete " + metadataIdentifier + " from redis , cause: " + e.getMessage();
|
||||
logger.error(TRANSPORT_FAILED_RESPONSE, "", "", msg, e);
|
||||
throw new RpcException(msg, e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void deleteMetadataInCluster(BaseMetadataIdentifier metadataIdentifier) {
|
||||
try (JedisCluster jedisCluster =
|
||||
new JedisCluster(jedisClusterNodes, timeout, timeout, 2, password, new GenericObjectPoolConfig<>())) {
|
||||
|
|
@ -187,11 +227,24 @@ public class RedisMetadataReport extends AbstractMetadataReport {
|
|||
private String getMetadata(BaseMetadataIdentifier metadataIdentifier) {
|
||||
if (pool != null) {
|
||||
return getMetadataStandalone(metadataIdentifier);
|
||||
}else if(sentinelPool != null) {
|
||||
return getMetadataSentinel(metadataIdentifier);
|
||||
} else {
|
||||
return getMetadataInCluster(metadataIdentifier);
|
||||
}
|
||||
}
|
||||
|
||||
private String getMetadataSentinel(BaseMetadataIdentifier metadataIdentifier) {
|
||||
try (Jedis jedisSentinel = sentinelPool.getResource()) {
|
||||
return jedisSentinel.get(metadataIdentifier.getUniqueKey(KeyTypeEnum.UNIQUE_KEY));
|
||||
} catch (Throwable e) {
|
||||
String msg = "Failed to get " + metadataIdentifier + " from redis , cause: " + e.getMessage();
|
||||
logger.error(TRANSPORT_FAILED_RESPONSE, "", "", msg, e);
|
||||
throw new RpcException(msg, e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private String getMetadataInCluster(BaseMetadataIdentifier metadataIdentifier) {
|
||||
try (JedisCluster jedisCluster =
|
||||
new JedisCluster(jedisClusterNodes, timeout, timeout, 2, password, new GenericObjectPoolConfig<>())) {
|
||||
|
|
@ -243,6 +296,8 @@ public class RedisMetadataReport extends AbstractMetadataReport {
|
|||
private boolean storeMapping(String key, String field, String value, String ticket) {
|
||||
if (pool != null) {
|
||||
return storeMappingStandalone(key, field, value, ticket);
|
||||
}else if(sentinelPool != null) {
|
||||
return storeMappingSentinel(key, field, value, ticket);
|
||||
} else {
|
||||
return storeMappingInCluster(key, field, value, ticket);
|
||||
}
|
||||
|
|
@ -278,6 +333,33 @@ public class RedisMetadataReport extends AbstractMetadataReport {
|
|||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* use 'watch' to implement cas.
|
||||
* Find information about slot distribution by key.
|
||||
*/
|
||||
private boolean storeMappingSentinel(String key, String field, String value, String ticket) {
|
||||
try (Jedis jedisSentinel = sentinelPool.getResource()) {
|
||||
jedisSentinel.watch(key);
|
||||
String oldValue = jedisSentinel.hget(key, field);
|
||||
if (null == oldValue || null == ticket || oldValue.equals(ticket)) {
|
||||
Transaction transaction = jedisSentinel.multi();
|
||||
transaction.hset(key, field, value);
|
||||
List<Object> result = transaction.exec();
|
||||
if (null != result) {
|
||||
jedisSentinel.publish(buildPubSubKey(), field);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
jedisSentinel.unwatch();
|
||||
} catch (Throwable e) {
|
||||
String msg = "Failed to put " + key + ":" + field + " to redis " + value + ", cause: " + e.getMessage();
|
||||
logger.error(TRANSPORT_FAILED_RESPONSE, "", "", msg, e);
|
||||
throw new RpcException(msg, e);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* use 'watch' to implement cas.
|
||||
* Find information about slot distribution by key.
|
||||
|
|
@ -339,6 +421,8 @@ public class RedisMetadataReport extends AbstractMetadataReport {
|
|||
private String getMappingData(String key, String field) {
|
||||
if (pool != null) {
|
||||
return getMappingDataStandalone(key, field);
|
||||
}else if(sentinelPool != null) {
|
||||
return getMappingDataSentinel(key, field);
|
||||
} else {
|
||||
return getMappingDataInCluster(key, field);
|
||||
}
|
||||
|
|
@ -355,6 +439,17 @@ public class RedisMetadataReport extends AbstractMetadataReport {
|
|||
}
|
||||
}
|
||||
|
||||
private String getMappingDataSentinel(String key, String field) {
|
||||
try (Jedis jedisSentinel = sentinelPool.getResource()) {
|
||||
return jedisSentinel.hget(key, field);
|
||||
} catch (Throwable e) {
|
||||
String msg = "Failed to get " + key + ":" + field + " from redis , cause: " + e.getMessage();
|
||||
logger.error(TRANSPORT_FAILED_RESPONSE, "", "", msg, e);
|
||||
throw new RpcException(msg, e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private String getMappingDataStandalone(String key, String field) {
|
||||
try (Jedis jedis = pool.getResource()) {
|
||||
return jedis.hget(key, field);
|
||||
|
|
@ -502,6 +597,14 @@ public class RedisMetadataReport extends AbstractMetadataReport {
|
|||
logger.error(TRANSPORT_FAILED_RESPONSE, "", "", msg, e);
|
||||
throw new RpcException(msg, e);
|
||||
}
|
||||
} else if (sentinelPool != null) {
|
||||
try (Jedis jedisSentinel = sentinelPool.getResource()) {
|
||||
jedisSentinel.subscribe(notifySub, path);
|
||||
} catch (Throwable e) {
|
||||
String msg = "Failed to subscribe " + path + ", cause: " + e.getMessage();
|
||||
logger.error(TRANSPORT_FAILED_RESPONSE, "", "", msg, e);
|
||||
throw new RpcException(msg, e);
|
||||
}
|
||||
} else {
|
||||
try (JedisCluster jedisCluster = new JedisCluster(
|
||||
jedisClusterNodes, timeout, timeout, 2, password, new GenericObjectPoolConfig<>())) {
|
||||
|
|
|
|||
|
|
@ -25,13 +25,25 @@ dubbo:
|
|||
username: dubbo
|
||||
password: ${spring.data.redis.password}
|
||||
# 集群开关
|
||||
cluster: false
|
||||
sentinel: true
|
||||
parameters:
|
||||
namespace: ${spring.profiles.active}
|
||||
database: ${spring.data.redis.database}
|
||||
timeout: ${spring.data.redis.timeout}
|
||||
# 集群地址 cluster 为 true 生效
|
||||
backup: 127.0.0.1:6379,127.0.0.1:6381
|
||||
backup: ${spring.data.redis.sentinel.nodes}
|
||||
# metadata-report:
|
||||
# address: redis://${spring.data.redis.host}:${spring.data.redis.port}
|
||||
# group: DUBBO_GROUP
|
||||
# username: dubbo
|
||||
# password: ${spring.data.redis.password}
|
||||
# # 集群开关
|
||||
# cluster: false
|
||||
# parameters:
|
||||
# namespace: ${spring.profiles.active}
|
||||
# database: ${spring.data.redis.database}
|
||||
# timeout: ${spring.data.redis.timeout}
|
||||
# # 集群地址 cluster 为 true 生效
|
||||
# backup: 127.0.0.1:6379,127.0.0.1:6381
|
||||
# 消费者相关配置
|
||||
consumer:
|
||||
# 结果缓存(LRU算法)
|
||||
|
|
@ -43,3 +55,12 @@ dubbo:
|
|||
retries: 0
|
||||
# 初始化检查
|
||||
check: false
|
||||
|
||||
logging:
|
||||
level:
|
||||
# 设置 Dubbo 核心包的日志级别为 DEBUG
|
||||
org.apache.dubbo: DEBUG
|
||||
# 如果需要更细粒度的调试,可指定元数据报告模块
|
||||
org.apache.dubbo.metadata: DEBUG
|
||||
# Redis 客户端日志(可选)
|
||||
io.lettuce.core: WARN # 避免 Redis 连接日志过多
|
||||
|
|
|
|||
|
|
@ -38,12 +38,12 @@ public enum DataScopeType {
|
|||
/**
|
||||
* 部门数据权限
|
||||
*/
|
||||
DEPT("3", " #{#deptName} = #{#user.deptId} ", " 1 = 0 "),
|
||||
DEPT("3", " #{#deptName} = #{#user.manageDeptId} ", " 1 = 0 "),
|
||||
|
||||
/**
|
||||
* 部门及以下数据权限
|
||||
*/
|
||||
DEPT_AND_CHILD("4", " #{#deptName} IN ( #{@sdss.getDeptAndChild( #user.deptId )} )", " 1 = 0 "),
|
||||
DEPT_AND_CHILD("4", " #{#deptName} IN ( #{@sdss.getDeptAndChild( #user.manageDeptId )} )", " 1 = 0 "),
|
||||
|
||||
/**
|
||||
* 仅本人数据权限
|
||||
|
|
|
|||
|
|
@ -577,23 +577,55 @@ public class RedisUtils {
|
|||
* 模糊查询
|
||||
* */
|
||||
public static List<JSONObject> searchAndGetKeysValues(String pattern) {
|
||||
final int BATCH_SIZE = 1000; // 每批处理1000个key,可根据实际情况调整
|
||||
|
||||
RKeys keys = CLIENT.getKeys();
|
||||
// 模糊查询获取匹配的key
|
||||
Iterable<String> keysIterable = keys.getKeysByPattern(pattern);
|
||||
List<JSONObject> list = new ArrayList<>();
|
||||
// RBatch batch = CLIENT.createBatch();
|
||||
// 批量获取这些key的值
|
||||
for (String key : keysIterable) {
|
||||
RBucket<Object> bucket = CLIENT.getBucket(key);
|
||||
Object value = bucket.get();
|
||||
JSONObject jsonObject = JSONUtil.parseObj(value.toString());
|
||||
list.add(jsonObject);
|
||||
|
||||
// 收集所有匹配的 key 到 list(SCAN 是惰性的,必须遍历完才能拿到全部)
|
||||
List<String> allKeys = new ArrayList<>();
|
||||
keysIterable.forEach(allKeys::add);
|
||||
|
||||
if (allKeys.isEmpty()) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
// 执行批量获取
|
||||
// BatchResult result = batch.execute();
|
||||
// 输出结果
|
||||
return list;
|
||||
List<JSONObject> result = new ArrayList<>(allKeys.size());
|
||||
|
||||
// 分批处理
|
||||
for (int i = 0; i < allKeys.size(); i += BATCH_SIZE) {
|
||||
int end = Math.min(i + BATCH_SIZE, allKeys.size());
|
||||
List<String> batchKeys = allKeys.subList(i, end);
|
||||
|
||||
RBatch batch = CLIENT.createBatch();
|
||||
Map<String, RFuture<Object>> futureMap = new HashMap<>();
|
||||
|
||||
// 添加异步 get 请求
|
||||
for (String key : batchKeys) {
|
||||
RFuture<Object> future = batch.getBucket(key).getAsync();
|
||||
futureMap.put(key, future);
|
||||
}
|
||||
|
||||
// 执行批量命令(一次网络往返)
|
||||
batch.execute();
|
||||
|
||||
// 收集结果
|
||||
for (String key : batchKeys) {
|
||||
try {
|
||||
Object value = futureMap.get(key).getNow(); // 已执行完毕,无需阻塞
|
||||
if (value != null) {
|
||||
JSONObject jsonObject = JSONUtil.parseObj(value.toString());
|
||||
result.add(jsonObject);
|
||||
}
|
||||
// 可选:记录缺失 key(用于 debug)
|
||||
} catch (Exception e) {
|
||||
// 日志记录异常,避免一个 key 失败导致整体失败
|
||||
// log.warn("Failed to get value for key: {}", key, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -602,6 +634,9 @@ public class RedisUtils {
|
|||
public static JSONObject getBucket(String key){
|
||||
RBucket<Object> bucket = CLIENT.getBucket(key);
|
||||
Object value = bucket.get();
|
||||
if (null == value){
|
||||
return null;
|
||||
}
|
||||
return JSONUtil.parseObj(value.toString());
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -35,6 +35,8 @@ public class LoginHelper {
|
|||
public static final String USER_KEY = "userId";
|
||||
public static final String USER_NAME_KEY = "userName";
|
||||
public static final String DEPT_KEY = "deptId";
|
||||
|
||||
public static final String MANAGE_DEPT__KEY = "manageDeptId";
|
||||
public static final String DEPT_NAME_KEY = "deptName";
|
||||
public static final String DEPT_CATEGORY_KEY = "deptCategory";
|
||||
public static final String CLIENT_KEY = "clientid";
|
||||
|
|
@ -53,6 +55,7 @@ public class LoginHelper {
|
|||
.setExtra(USER_KEY, loginUser.getUserId())
|
||||
.setExtra(USER_NAME_KEY, loginUser.getUsername())
|
||||
.setExtra(DEPT_KEY, loginUser.getDeptId())
|
||||
.setExtra(MANAGE_DEPT__KEY,loginUser.getManageDeptId())
|
||||
.setExtra(DEPT_NAME_KEY, loginUser.getDeptName())
|
||||
.setExtra(DEPT_CATEGORY_KEY, loginUser.getDeptCategory())
|
||||
);
|
||||
|
|
|
|||
|
|
@ -1,114 +1,49 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<configuration scan="true" scanPeriod="60 seconds" debug="false">
|
||||
<!-- 日志存放路径 -->
|
||||
<property name="log.path" value="logs/${project.artifactId}"/>
|
||||
<property name="log.path" value="logs" />
|
||||
<property name="log.file" value="gateway" />
|
||||
<property name="MAX_FILE_SIZE" value="50MB" />
|
||||
<property name="MAX_HISTORY" value="30" />
|
||||
<!-- 日志输出格式 -->
|
||||
<property name="console.log.pattern"
|
||||
value="%red(%d{yyyy-MM-dd HH:mm:ss}) %green([%thread]) %highlight(%-5level) %boldMagenta(%logger{36}%n) - %msg%n"/>
|
||||
<property name="log.pattern" value="%d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger{36} - %msg%n"/>
|
||||
|
||||
<!-- 控制台输出 -->
|
||||
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<!-- INFO日志Appender -->
|
||||
<appender name="FILE_INFO" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.path}/info.${log.file}.log</file>
|
||||
<filter class="ch.qos.logback.classic.filter.LevelFilter">
|
||||
<level>INFO</level>
|
||||
<onMatch>ACCEPT</onMatch>
|
||||
<onMismatch>DENY</onMismatch>
|
||||
</filter>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${log.path}/info/info.${log.file}.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
|
||||
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
|
||||
<maxHistory>${MAX_HISTORY}</maxHistory>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>${console.log.pattern}</pattern>
|
||||
<charset>utf-8</charset>
|
||||
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<!-- 控制台输出 -->
|
||||
<appender name="file_console" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.path}/console.log</file>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
|
||||
<!-- 日志文件名格式 -->
|
||||
<fileNamePattern>${log.path}/console.%d{yyyy-MM-dd}.log</fileNamePattern>
|
||||
<!-- 日志最大 1天 -->
|
||||
<maxHistory>1</maxHistory>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>${log.pattern}</pattern>
|
||||
<charset>utf-8</charset>
|
||||
</encoder>
|
||||
<!-- ERROR日志Appender -->
|
||||
<appender name="FILE_ERROR" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.path}/error.${log.file}.log</file>
|
||||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||
<!-- 过滤的级别 -->
|
||||
<level>INFO</level>
|
||||
</filter>
|
||||
</appender>
|
||||
|
||||
<!-- 系统日志输出 -->
|
||||
<appender name="file_info" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.path}/info.log</file>
|
||||
<!-- 循环政策:基于时间创建日志文件 -->
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
|
||||
<!-- 日志文件名格式 -->
|
||||
<fileNamePattern>${log.path}/info.%d{yyyy-MM-dd}.log</fileNamePattern>
|
||||
<!-- 日志最大的历史 60天 -->
|
||||
<maxHistory>60</maxHistory>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>${log.pattern}</pattern>
|
||||
</encoder>
|
||||
<filter class="ch.qos.logback.classic.filter.LevelFilter">
|
||||
<!-- 过滤的级别 -->
|
||||
<level>INFO</level>
|
||||
<!-- 匹配时的操作:接收(记录) -->
|
||||
<onMatch>ACCEPT</onMatch>
|
||||
<!-- 不匹配时的操作:拒绝(不记录) -->
|
||||
<onMismatch>DENY</onMismatch>
|
||||
</filter>
|
||||
</appender>
|
||||
|
||||
<appender name="file_error" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.path}/error.log</file>
|
||||
<!-- 循环政策:基于时间创建日志文件 -->
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
|
||||
<!-- 日志文件名格式 -->
|
||||
<fileNamePattern>${log.path}/error.%d{yyyy-MM-dd}.log</fileNamePattern>
|
||||
<!-- 日志最大的历史 60天 -->
|
||||
<maxHistory>60</maxHistory>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>${log.pattern}</pattern>
|
||||
</encoder>
|
||||
<filter class="ch.qos.logback.classic.filter.LevelFilter">
|
||||
<!-- 过滤的级别 -->
|
||||
<level>ERROR</level>
|
||||
<!-- 匹配时的操作:接收(记录) -->
|
||||
<onMatch>ACCEPT</onMatch>
|
||||
<!-- 不匹配时的操作:拒绝(不记录) -->
|
||||
<onMismatch>DENY</onMismatch>
|
||||
</filter>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${log.path}/error/error.${log.file}.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
|
||||
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
|
||||
<maxHistory>${MAX_HISTORY}</maxHistory>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<!-- info异步输出 -->
|
||||
<appender name="async_info" class="ch.qos.logback.classic.AsyncAppender">
|
||||
<!-- 不丢失日志.默认的,如果队列的80%已满,则会丢弃TRACT、DEBUG、INFO级别的日志 -->
|
||||
<discardingThreshold>0</discardingThreshold>
|
||||
<!-- 更改默认的队列的深度,该值会影响性能.默认值为256 -->
|
||||
<queueSize>512</queueSize>
|
||||
<!-- 添加附加的appender,最多只能添加一个 -->
|
||||
<appender-ref ref="file_info"/>
|
||||
</appender>
|
||||
|
||||
<!-- error异步输出 -->
|
||||
<appender name="async_error" class="ch.qos.logback.classic.AsyncAppender">
|
||||
<!-- 不丢失日志.默认的,如果队列的80%已满,则会丢弃TRACT、DEBUG、INFO级别的日志 -->
|
||||
<discardingThreshold>0</discardingThreshold>
|
||||
<!-- 更改默认的队列的深度,该值会影响性能.默认值为256 -->
|
||||
<queueSize>512</queueSize>
|
||||
<!-- 添加附加的appender,最多只能添加一个 -->
|
||||
<appender-ref ref="file_error"/>
|
||||
</appender>
|
||||
|
||||
<include resource="logback-logstash.xml" />
|
||||
|
||||
<!-- 开启 skywalking 日志收集 -->
|
||||
<include resource="logback-skylog.xml" />
|
||||
|
||||
<!--系统操作日志-->
|
||||
<root level="info">
|
||||
<appender-ref ref="console"/>
|
||||
<appender-ref ref="async_info"/>
|
||||
<appender-ref ref="async_error"/>
|
||||
<appender-ref ref="file_console"/>
|
||||
<!-- 根Logger配置(禁用控制台输出) -->
|
||||
<root level="INFO">
|
||||
<appender-ref ref="FILE_INFO" />
|
||||
<appender-ref ref="FILE_ERROR" />
|
||||
</root>
|
||||
|
||||
</configuration>
|
||||
|
|
|
|||
|
|
@ -16,6 +16,11 @@
|
|||
<module>stwzhj-workflow</module>
|
||||
<module>stwzhj-data2es</module>
|
||||
<module>stwzhj-baseToSt</module>
|
||||
<module>stwzhj-data2StKafka</module>
|
||||
<module>stwzhj-extract</module>
|
||||
<module>stwzhj-data2gas</module>
|
||||
<module>stwzhj-kafka-consumer</module>
|
||||
<module>stwzhj-kafka-producer</module>
|
||||
</modules>
|
||||
|
||||
<artifactId>stwzhj-modules</artifactId>
|
||||
|
|
|
|||
|
|
@ -0,0 +1,131 @@
|
|||
|
||||
# 设备数据同步功能
|
||||
|
||||
## 功能介绍
|
||||
|
||||
本模块实现了从本地MySQL数据库定时抽取设备数据到目标PostgreSQL数据库的功能。每个地市部署一个程序实例,从本地MySQL数据库抽取数据到统一的目标PostgreSQL数据库。
|
||||
|
||||
## 主要特性
|
||||
|
||||
1. **轻量化设计**:移除了nacos等微服务相关依赖,只保留了必要的功能
|
||||
2. **增量同步**:根据目标表中的info_source查询该地市最新更新的数据,然后从源表中抽取
|
||||
3. **设备类型映射**:通过sys_dict_data字典表进行设备类型转换
|
||||
4. **批量处理**:支持批量插入和更新,提高同步效率
|
||||
5. **可配置**:所有配置通过application.yml文件进行配置
|
||||
|
||||
## 部署架构
|
||||
|
||||
每个地市部署一个程序实例,从本地MySQL数据库抽取数据到统一的目标PostgreSQL数据库:
|
||||
|
||||
```
|
||||
地市A实例 -> 本地MySQL A -> 目标PostgreSQL
|
||||
地市B实例 -> 本地MySQL B -> 目标PostgreSQL
|
||||
地市C实例 -> 本地MySQL C -> 目标PostgreSQL
|
||||
```
|
||||
|
||||
## 配置说明
|
||||
|
||||
### 数据源配置
|
||||
|
||||
在application.yml中配置源数据库和目标数据库:
|
||||
|
||||
```yaml
|
||||
spring:
|
||||
datasource:
|
||||
dynamic:
|
||||
primary: target #设置默认数据源为目标数据库
|
||||
strict: false #严格匹配数据源
|
||||
datasource:
|
||||
# 源数据库(MySQL) - 每个地市配置不同的源数据库
|
||||
source:
|
||||
url: jdbc:mysql://localhost:3306/wzhj_hs
|
||||
username: root
|
||||
password: root
|
||||
driver-class-name: com.mysql.cj.jdbc.Driver
|
||||
# 目标数据库(PostgreSQL) - 所有地市使用相同的目标数据库
|
||||
target:
|
||||
url: jdbc:postgresql://localhost:5432/your_database
|
||||
username: postgres
|
||||
password: postgres
|
||||
driver-class-name: org.postgresql.Driver
|
||||
```
|
||||
|
||||
### 设备同步配置
|
||||
|
||||
```yaml
|
||||
device-sync:
|
||||
# 信息来源标识 - 每个地市配置不同的info_source
|
||||
info-source: "3418"
|
||||
# 源表结构类型 - v1或v2
|
||||
# v1: 第一种表结构,字段使用下划线命名(如police_no)
|
||||
# v2: 第二种表结构,部分字段使用驼峰命名(如policeNo)
|
||||
source-table-type: v1
|
||||
# 设备类型映射字典类型
|
||||
dict-type: device_type_tost
|
||||
# 批量插入大小
|
||||
batch-size: 50
|
||||
# 定时任务cron表达式
|
||||
cron: 0 0/10 * * * ?
|
||||
```
|
||||
|
||||
### 源表结构类型说明
|
||||
|
||||
本系统支持两种不同的源表结构:
|
||||
|
||||
1. **v1类型**:第一种表结构
|
||||
- 字段使用下划线命名,如police_no、police_name、phone_num等
|
||||
- 包含所有字段,包括录入单位、修改单位、设备品牌、设备型号等详细信息
|
||||
|
||||
2. **v2类型**:第二种表结构
|
||||
- 部分字段使用驼峰命名,如policeNo、policeName、phoneNum等
|
||||
- 只包含核心字段,不包含录入单位、修改单位、设备品牌、设备型号等详细信息
|
||||
|
||||
根据源数据库的表结构,在配置文件中设置相应的source-table-type值。
|
||||
|
||||
## 数据同步逻辑
|
||||
|
||||
1. 根据目标表中的info_source查询该地市最新更新的时间
|
||||
2. 从本地MySQL数据库查询更新时间大于该时间的所有设备数据
|
||||
3. 通过sys_dict_data字典表进行设备类型映射
|
||||
4. 根据device_code和info_source判断设备是否存在,存在则更新,不存在则新增
|
||||
5. 批量处理数据,提高同步效率
|
||||
|
||||
## 设备类型映射
|
||||
|
||||
设备类型映射通过sys_dict_data字典表实现,配置规则如下:
|
||||
|
||||
- dict_type: device_type_tost
|
||||
- dict_value: 源表的device_type值
|
||||
- dict_label: 目标表的device_type值
|
||||
|
||||
示例:
|
||||
| dict_type | dict_value | dict_label |
|
||||
|-----------|------------|------------|
|
||||
| device_type_tost | 1 | 1 |
|
||||
| device_type_tost | 2 | 2 |
|
||||
| device_type_tost | 北斗 | 1 |
|
||||
| device_type_tost | 车载 | 2 |
|
||||
|
||||
## 定时任务
|
||||
|
||||
系统默认配置了两个定时任务:
|
||||
|
||||
1. sendToSt:设备数据同步到省厅(原有功能)
|
||||
2. syncDevicesFromSource:从源数据库同步设备数据(新增功能)
|
||||
|
||||
定时任务的执行时间可以通过application.yml中的cron表达式进行配置。
|
||||
|
||||
## 使用说明
|
||||
|
||||
1. 修改application.yml配置文件,配置本地MySQL数据库和目标PostgreSQL数据库的连接信息
|
||||
2. 配置该地市的info_source标识
|
||||
3. 配置设备类型映射字典数据
|
||||
4. 启动应用,系统将自动执行定时同步任务
|
||||
|
||||
## 注意事项
|
||||
|
||||
1. 确保源数据库和目标数据库的t_device表结构正确
|
||||
2. 确保sys_dict_data表中配置了正确的设备类型映射
|
||||
3. 首次同步时,如果目标表中无数据,将同步所有源数据
|
||||
4. 建议在非高峰期执行同步任务,避免影响业务系统性能
|
||||
5. 不同地市的info_source必须唯一,否则会导致数据冲突
|
||||
|
|
@ -16,35 +16,33 @@
|
|||
</description>
|
||||
|
||||
<dependencies>
|
||||
<!-- PostgreSQL Driver -->
|
||||
<dependency>
|
||||
<groupId>org.dromara</groupId>
|
||||
<artifactId>stwzhj-common-nacos</artifactId>
|
||||
<groupId>org.postgresql</groupId>
|
||||
<artifactId>postgresql</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- 动态数据源 -->
|
||||
<dependency>
|
||||
<groupId>com.baomidou</groupId>
|
||||
<artifactId>dynamic-datasource-spring-boot3-starter</artifactId>
|
||||
<version>${dynamic-ds.version}</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.dromara</groupId>
|
||||
<artifactId>stwzhj-common-sentinel</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- RuoYi Common Log -->
|
||||
<dependency>
|
||||
<groupId>org.dromara</groupId>
|
||||
<artifactId>stwzhj-common-log</artifactId>
|
||||
<groupId>org.projectlombok</groupId>
|
||||
<artifactId>lombok</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.dromara</groupId>
|
||||
<artifactId>stwzhj-common-dict</artifactId>
|
||||
<groupId>cn.hutool</groupId>
|
||||
<artifactId>hutool-all</artifactId>
|
||||
<version>5.4.0</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.dromara</groupId>
|
||||
<artifactId>stwzhj-common-doc</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.dromara</groupId>
|
||||
<artifactId>stwzhj-common-web</artifactId>
|
||||
<groupId>ch.qos.logback</groupId>
|
||||
<artifactId>logback-classic</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
|
@ -52,50 +50,10 @@
|
|||
<artifactId>mysql-connector-j</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- Spring Boot Starter Web -->
|
||||
<dependency>
|
||||
<groupId>org.dromara</groupId>
|
||||
<artifactId>stwzhj-common-dubbo</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.dromara</groupId>
|
||||
<artifactId>stwzhj-common-seata</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.dromara</groupId>
|
||||
<artifactId>stwzhj-common-idempotent</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.dromara</groupId>
|
||||
<artifactId>stwzhj-common-tenant</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.dromara</groupId>
|
||||
<artifactId>stwzhj-common-security</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.dromara</groupId>
|
||||
<artifactId>stwzhj-common-translation</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.dromara</groupId>
|
||||
<artifactId>stwzhj-common-sensitive</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.dromara</groupId>
|
||||
<artifactId>stwzhj-common-encrypt</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- RuoYi Api System -->
|
||||
<dependency>
|
||||
<groupId>org.dromara</groupId>
|
||||
<artifactId>stwzhj-api-system</artifactId>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-web</artifactId>
|
||||
</dependency>
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -2,20 +2,17 @@ package org.dromara.basetost;
|
|||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||
import org.springframework.cloud.client.discovery.EnableDiscoveryClient;
|
||||
|
||||
import org.springframework.data.jpa.repository.config.EnableJpaAuditing;
|
||||
import org.springframework.scheduling.annotation.EnableScheduling;
|
||||
|
||||
/**
|
||||
* <p>description: </p>
|
||||
* 设备数据同步应用
|
||||
*
|
||||
* @author chenle
|
||||
* @date 2023-05-20 12:01
|
||||
*/
|
||||
@SpringBootApplication
|
||||
@EnableJpaAuditing
|
||||
@EnableDiscoveryClient
|
||||
@EnableScheduling
|
||||
public class BaseToSTApplication {
|
||||
public static void main(String[] args) {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,37 @@
|
|||
|
||||
package org.dromara.basetost.config;
|
||||
|
||||
import lombok.Data;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
/**
|
||||
* 设备数据同步配置
|
||||
*/
|
||||
@Data
|
||||
@Component
|
||||
@ConfigurationProperties(prefix = "device-sync")
|
||||
public class DeviceSyncConfig {
|
||||
|
||||
/**
|
||||
* 信息来源标识 - 每个地市配置不同的info_source
|
||||
*/
|
||||
private String infoSource = "3416";
|
||||
|
||||
/**
|
||||
* 设备类型映射字典类型
|
||||
*/
|
||||
private String dictType = "device_type_tost";
|
||||
|
||||
/**
|
||||
* 批量插入大小
|
||||
*/
|
||||
private int batchSize = 50;
|
||||
|
||||
/**
|
||||
* 源表结构类型 - v1或v2
|
||||
* v1: 第一种表结构,字段使用下划线命名(如police_no)
|
||||
* v2: 第二种表结构,部分字段使用驼峰命名(如policeNo)
|
||||
*/
|
||||
private String sourceTableType = "v1";
|
||||
}
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
|
||||
package org.dromara.basetost.config;
|
||||
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.data.jpa.repository.config.EnableJpaRepositories;
|
||||
|
||||
/**
|
||||
* JPA配置类
|
||||
*/
|
||||
@Configuration
|
||||
@EnableJpaRepositories(basePackages = "org.dromara.basetost.repository")
|
||||
public class JpaConfig {
|
||||
// JPA配置,启用JPA仓库
|
||||
}
|
||||
|
|
@ -26,14 +26,7 @@ public class Device implements AbstractGpsEntity, Serializable {
|
|||
@Column(name = "device_code")
|
||||
private String deviceCode;
|
||||
|
||||
/**
|
||||
* <option value="1">北斗有源手持</option>
|
||||
* <option value="2">车载</option>
|
||||
* <option value="3">PDT</option>
|
||||
* <option value="4">警务通</option>
|
||||
* <option value="5">执法记录仪</option>
|
||||
* <option value="6">其他</option>
|
||||
*/
|
||||
|
||||
@Column(name = "device_type")
|
||||
private String deviceType;
|
||||
|
||||
|
|
@ -52,19 +45,19 @@ public class Device implements AbstractGpsEntity, Serializable {
|
|||
/**
|
||||
* 警号(若有)
|
||||
*/
|
||||
@Column(name = "policeNo")
|
||||
@Column(name = "police_no")
|
||||
private String policeNo;
|
||||
|
||||
/**
|
||||
* 姓名(若有)
|
||||
*/
|
||||
@Column(name = "policeName")
|
||||
@Column(name = "police_name")
|
||||
private String policeName;
|
||||
|
||||
/**
|
||||
* 联系电话(若有)
|
||||
*/
|
||||
@Column(name = "phoneNum")
|
||||
@Column(name = "phone_num")
|
||||
private String phoneNum;
|
||||
|
||||
/**
|
||||
|
|
@ -105,4 +98,88 @@ public class Device implements AbstractGpsEntity, Serializable {
|
|||
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss",timezone = "GMT+8")
|
||||
private Date updateTime;
|
||||
|
||||
/**
|
||||
* 地市
|
||||
*/
|
||||
@Column(name = "info_source")
|
||||
private String infoSource;
|
||||
|
||||
/**
|
||||
* 录入单位代码
|
||||
*/
|
||||
@Column(name = "lrdwdm")
|
||||
private String lrdwdm;
|
||||
|
||||
/**
|
||||
* 录入单位名称
|
||||
*/
|
||||
@Column(name = "lrdwmc")
|
||||
private String lrdwmc;
|
||||
|
||||
/**
|
||||
* 录入人姓名
|
||||
*/
|
||||
@Column(name = "lrrxm")
|
||||
private String lrrxm;
|
||||
|
||||
/**
|
||||
* 录入人身份证
|
||||
*/
|
||||
@Column(name = "lrrsfzh")
|
||||
private String lrrsfzh;
|
||||
|
||||
/**
|
||||
* 修改单位代码
|
||||
*/
|
||||
@Column(name = "xgdwdm")
|
||||
private String xgdwdm;
|
||||
|
||||
/**
|
||||
* 修改单位名称
|
||||
*/
|
||||
@Column(name = "xgdwmc")
|
||||
private String xgdwmc;
|
||||
|
||||
/**
|
||||
* 修改人姓名
|
||||
*/
|
||||
@Column(name = "xgrxm")
|
||||
private String xgrxm;
|
||||
|
||||
/**
|
||||
* 修改人身份证
|
||||
*/
|
||||
@Column(name = "xgrsfzh")
|
||||
private String xgrsfzh;
|
||||
|
||||
/**
|
||||
* 设备品牌
|
||||
*/
|
||||
@Column(name = "sbpp")
|
||||
private String sbpp;
|
||||
|
||||
/**
|
||||
* 设备型号
|
||||
*/
|
||||
@Column(name = "sbxh")
|
||||
private String sbxh;
|
||||
|
||||
/**
|
||||
* 警员身份证号
|
||||
*/
|
||||
@Column(name = "card_num")
|
||||
private String cardNum;
|
||||
|
||||
/**
|
||||
* 通道编码
|
||||
*/
|
||||
@Column(name = "tdbm")
|
||||
private String tdbm;
|
||||
|
||||
/**
|
||||
* 国标编码
|
||||
*/
|
||||
@Column(name = "gbbm")
|
||||
private String gbbm;
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -49,54 +49,8 @@ public class DictData implements Serializable {
|
|||
@Column(name = "dict_type")
|
||||
private String dictType;
|
||||
|
||||
/**
|
||||
* 样式属性(其他样式扩展)
|
||||
*/
|
||||
@Column(name = "css_class")
|
||||
private String cssClass;
|
||||
|
||||
/**
|
||||
* 表格回显样式
|
||||
*/
|
||||
@Column(name = "list_class")
|
||||
private String listClass;
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* 状态(0正常 1停用)
|
||||
*/
|
||||
@Column(name = "status")
|
||||
private String status;
|
||||
|
||||
/**
|
||||
* 创建者
|
||||
*/
|
||||
@Column(name = "create_by")
|
||||
private String createBy;
|
||||
|
||||
/**
|
||||
* 创建时间
|
||||
*/
|
||||
@Column(name = "create_time")
|
||||
private Date createTime;
|
||||
|
||||
/**
|
||||
* 更新者
|
||||
*/
|
||||
@Column(name = "update_by")
|
||||
private String updateBy;
|
||||
|
||||
/**
|
||||
* 更新时间
|
||||
*/
|
||||
@Column(name = "update_time")
|
||||
private Date updateTime;
|
||||
|
||||
/**
|
||||
* 备注
|
||||
*/
|
||||
@Column(name = "remark")
|
||||
private String remark;
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,186 @@
|
|||
|
||||
package org.dromara.basetost.entity;
|
||||
|
||||
import jakarta.persistence.*;
|
||||
import lombok.Data;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Date;
|
||||
|
||||
/**
|
||||
* 源设备实体类
|
||||
*/
|
||||
@Data
|
||||
@Entity
|
||||
@Table(name = "t_device")
|
||||
public class SourceDevice implements Serializable {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
@Id
|
||||
@Column(name = "id", nullable = false)
|
||||
@GeneratedValue(strategy = GenerationType.IDENTITY)
|
||||
private Integer id;
|
||||
|
||||
/**
|
||||
* 外部系统设备编号建议21位
|
||||
*/
|
||||
@Column(name = "device_code")
|
||||
private String deviceCode;
|
||||
|
||||
/**
|
||||
* 设备类型
|
||||
*/
|
||||
@Column(name = "device_type")
|
||||
private String deviceType;
|
||||
|
||||
/**
|
||||
* 组织机构代码
|
||||
*/
|
||||
@Column(name = "zzjgdm")
|
||||
private String zzjgdm;
|
||||
|
||||
/**
|
||||
* 组织机构名称
|
||||
*/
|
||||
@Column(name = "zzjgmc")
|
||||
private String zzjgmc;
|
||||
|
||||
/**
|
||||
* 警号(若有)
|
||||
*/
|
||||
@Column(name = "police_no")
|
||||
private String policeNo;
|
||||
|
||||
/**
|
||||
* 姓名(若有)
|
||||
*/
|
||||
@Column(name = "police_name")
|
||||
private String policeName;
|
||||
|
||||
/**
|
||||
* 联系电话(若有)
|
||||
*/
|
||||
@Column(name = "phone_num")
|
||||
private String phoneNum;
|
||||
|
||||
/**
|
||||
* 车牌号(若有)
|
||||
*/
|
||||
@Column(name = "car_num")
|
||||
private String carNum;
|
||||
|
||||
/**
|
||||
* 0无效,1有效
|
||||
*/
|
||||
@Column(name = "valid")
|
||||
private Integer valid;
|
||||
|
||||
/**
|
||||
* 备注字段1
|
||||
*/
|
||||
@Column(name = "remark1")
|
||||
private String remark1;
|
||||
|
||||
/**
|
||||
* 备注字段2
|
||||
*/
|
||||
@Column(name = "remark2")
|
||||
private String remark2;
|
||||
|
||||
/**
|
||||
* 创建时间
|
||||
*/
|
||||
@Column(name = "create_time")
|
||||
private Date createTime;
|
||||
|
||||
/**
|
||||
* 最后更新时间
|
||||
*/
|
||||
@Column(name = "update_time")
|
||||
private Date updateTime;
|
||||
|
||||
/**
|
||||
* 地市
|
||||
*/
|
||||
@Column(name = "info_source")
|
||||
private String infoSource;
|
||||
|
||||
/**
|
||||
* 录入单位代码
|
||||
*/
|
||||
@Column(name = "lrdwdm")
|
||||
private String lrdwdm;
|
||||
|
||||
/**
|
||||
* 录入单位名称
|
||||
*/
|
||||
@Column(name = "lrdwmc")
|
||||
private String lrdwmc;
|
||||
|
||||
/**
|
||||
* 录入人姓名
|
||||
*/
|
||||
@Column(name = "lrrxm")
|
||||
private String lrrxm;
|
||||
|
||||
/**
|
||||
* 录入人身份证
|
||||
*/
|
||||
@Column(name = "lrrsfzh")
|
||||
private String lrrsfzh;
|
||||
|
||||
/**
|
||||
* 修改单位代码
|
||||
*/
|
||||
@Column(name = "xgdwdm")
|
||||
private String xgdwdm;
|
||||
|
||||
/**
|
||||
* 修改单位名称
|
||||
*/
|
||||
@Column(name = "xgdwmc")
|
||||
private String xgdwmc;
|
||||
|
||||
/**
|
||||
* 修改人姓名
|
||||
*/
|
||||
@Column(name = "xgrxm")
|
||||
private String xgrxm;
|
||||
|
||||
/**
|
||||
* 修改人身份证
|
||||
*/
|
||||
@Column(name = "xgrsfzh")
|
||||
private String xgrsfzh;
|
||||
|
||||
/**
|
||||
* 设备品牌
|
||||
*/
|
||||
@Column(name = "sbpp")
|
||||
private String sbpp;
|
||||
|
||||
/**
|
||||
* 设备型号
|
||||
*/
|
||||
@Column(name = "sbxh")
|
||||
private String sbxh;
|
||||
|
||||
/**
|
||||
* 警员身份证号
|
||||
*/
|
||||
@Column(name = "card_num")
|
||||
private String cardNum;
|
||||
|
||||
/**
|
||||
* 通道编码
|
||||
*/
|
||||
@Column(name = "tdbm")
|
||||
private String tdbm;
|
||||
|
||||
/**
|
||||
* 国标编码
|
||||
*/
|
||||
@Column(name = "gbbm")
|
||||
private String gbbm;
|
||||
}
|
||||
|
|
@ -0,0 +1,110 @@
|
|||
|
||||
package org.dromara.basetost.entity;
|
||||
|
||||
import jakarta.persistence.*;
|
||||
import lombok.Data;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Date;
|
||||
|
||||
/**
|
||||
* 源设备实体类V2 - 第二种表结构
|
||||
*/
|
||||
@Data
|
||||
@Entity
|
||||
@Table(name = "t_device")
|
||||
public class SourceDeviceV2 implements Serializable {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
@Id
|
||||
@Column(name = "id", nullable = false)
|
||||
@GeneratedValue(strategy = GenerationType.IDENTITY)
|
||||
private Integer id;
|
||||
|
||||
/**
|
||||
* 外部系统设备编号建议21位
|
||||
*/
|
||||
@Column(name = "device_code")
|
||||
private String deviceCode;
|
||||
|
||||
/**
|
||||
* 设备类型
|
||||
*/
|
||||
@Column(name = "device_type")
|
||||
private String deviceType;
|
||||
|
||||
/**
|
||||
* 组织机构代码
|
||||
*/
|
||||
@Column(name = "zzjgdm")
|
||||
private String zzjgdm;
|
||||
|
||||
/**
|
||||
* 组织机构名称
|
||||
*/
|
||||
@Column(name = "zzjgmc")
|
||||
private String zzjgmc;
|
||||
|
||||
/**
|
||||
* 警号(若有)- 使用驼峰命名
|
||||
*/
|
||||
@Column(name = "policeNo")
|
||||
private String policeNo;
|
||||
|
||||
/**
|
||||
* 姓名(若有)- 使用驼峰命名
|
||||
*/
|
||||
@Column(name = "policeName")
|
||||
private String policeName;
|
||||
|
||||
/**
|
||||
* 联系电话(若有)- 使用驼峰命名
|
||||
*/
|
||||
@Column(name = "phoneNum")
|
||||
private String phoneNum;
|
||||
|
||||
/**
|
||||
* 车牌号(若有)
|
||||
*/
|
||||
@Column(name = "car_num")
|
||||
private String carNum;
|
||||
|
||||
/**
|
||||
* 0无效,1有效
|
||||
*/
|
||||
@Column(name = "valid")
|
||||
private Integer valid;
|
||||
|
||||
/**
|
||||
* 备注字段1
|
||||
*/
|
||||
@Column(name = "remark1")
|
||||
private String remark1;
|
||||
|
||||
/**
|
||||
* 备注字段2
|
||||
*/
|
||||
@Column(name = "remark2")
|
||||
private String remark2;
|
||||
|
||||
/**
|
||||
* 创建时间
|
||||
*/
|
||||
@Column(name = "create_time")
|
||||
private Date createTime;
|
||||
|
||||
/**
|
||||
* 最后更新时间
|
||||
*/
|
||||
@Column(name = "update_time")
|
||||
private Date updateTime;
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* 警员身份证号 - 使用驼峰命名
|
||||
*/
|
||||
@Column(name = "card_num")
|
||||
private String cardNum;
|
||||
}
|
||||
|
|
@ -1,129 +0,0 @@
|
|||
package org.dromara.basetost.handler;
|
||||
|
||||
import cn.hutool.core.bean.BeanUtil;
|
||||
import cn.hutool.core.date.DateUtil;
|
||||
import org.dromara.basetost.entity.AbstractGpsEntity;
|
||||
import org.dromara.basetost.entity.Device;
|
||||
import org.dromara.basetost.entity.DictData;
|
||||
import org.dromara.basetost.repository.DictDataRepository;
|
||||
import org.dromara.common.core.utils.StringUtils;
|
||||
import org.dromara.system.api.domain.bo.RemoteDeviceBo;
|
||||
import org.dromara.system.api.domain.bo.RemoteDeviceToStBo;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Component;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* <p>description: </p>
|
||||
*
|
||||
* @author chenle
|
||||
* @date 2022-08-05 16:48
|
||||
*/
|
||||
@Component
|
||||
public abstract class AbstractAsyncHandler {
|
||||
|
||||
private Logger logger = LoggerFactory.getLogger(AsyncHandler.class);
|
||||
@Autowired
|
||||
AsyncHandler asyncHandler;
|
||||
|
||||
@Autowired
|
||||
DictDataRepository dictDataRepository;
|
||||
|
||||
|
||||
|
||||
private String lastUpdateTime;
|
||||
|
||||
|
||||
public boolean saveBaseToST(){
|
||||
|
||||
|
||||
try {
|
||||
// lastUpdateTime = PathUtil.getValueFromProperties("lastUpdateTime");
|
||||
lastUpdateTime = "2024-11-23 11:20:32";
|
||||
}catch (Exception e){
|
||||
logger.info("lastUpdateTime={},lastUpdateTimeError={}",lastUpdateTime,e.getMessage());
|
||||
return false;
|
||||
}
|
||||
|
||||
List<Device> data = getGpsInfoByTime(lastUpdateTime);
|
||||
logger.info("dataSize={}",data.size());
|
||||
if(CollectionUtils.isEmpty(data)){
|
||||
logger.info("此时无数据={}",lastUpdateTime);
|
||||
return true;
|
||||
}
|
||||
requestToData2es(data);
|
||||
return true;
|
||||
}
|
||||
|
||||
private void requestToData2es(List<Device> data){
|
||||
String infoSource = "3418";
|
||||
// bo.setInfoSource(PathUtil.getValueFromProperties("infoSource"));
|
||||
boolean b = true;
|
||||
int size = data.size();
|
||||
for (AbstractGpsEntity datum : data) {
|
||||
Device device = (Device) datum;
|
||||
DictData dictData = dictDataRepository.findDictDataByDictTypeAndDictLabel("device_type_tost", String.valueOf(device.getDeviceType()));
|
||||
String dictValue = null;
|
||||
if(!Objects.isNull(dictData)){
|
||||
dictValue = dictData.getDictValue();
|
||||
}
|
||||
if(StringUtils.isEmpty(dictValue)){
|
||||
dictValue = "99";
|
||||
}
|
||||
|
||||
device.setDeviceType(dictValue);
|
||||
}
|
||||
int forCount = size / 50;
|
||||
for (int i = 0; i <= forCount; i++) {
|
||||
List<Device> singleList = new ArrayList<>();
|
||||
|
||||
int fromIndex = i * 50;
|
||||
int endIndex = (i == forCount ? size: (i+1)*50);
|
||||
singleList = data.subList(fromIndex,endIndex);
|
||||
// BeanUtil.copyToList(singleList, RemoteDeviceBo.class)
|
||||
List<RemoteDeviceBo> list = new ArrayList<>();
|
||||
|
||||
// bo.setDeviceBoList();
|
||||
boolean singleB = asyncHandler.saveGpsAsync(infoSource,BeanUtil.copyToList(singleList, RemoteDeviceBo.class));
|
||||
if(!singleB){
|
||||
b = false;
|
||||
}
|
||||
}
|
||||
if(b){
|
||||
AbstractGpsEntity abstractGpsEntity = data.get(size - 1);
|
||||
Device lastDevice = (Device) abstractGpsEntity;
|
||||
resetUpdateTime(lastDevice.getUpdateTime());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
private void resetUpdateTime(Date gpsTime) {
|
||||
try {
|
||||
lastUpdateTime = DateUtil.format(gpsTime,"yyyy-MM-dd HH:mm:ss");
|
||||
// PathUtil.updateProperties("lastUpdateTime",lastUpdateTime,"ruansi.properties");
|
||||
}catch (Exception e){
|
||||
logger.info("lastTime reset error"+e.getMessage());
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
protected Device produceGpsInfo(AbstractGpsEntity gpsInfo){
|
||||
return null;
|
||||
};
|
||||
|
||||
protected boolean checkNullOrEmpty(AbstractGpsEntity gpsInfo) {
|
||||
return false;
|
||||
};
|
||||
|
||||
protected abstract List<Device> getGpsInfoByTime(String lastUpdateTime);
|
||||
}
|
||||
|
|
@ -1,46 +0,0 @@
|
|||
package org.dromara.basetost.handler;
|
||||
|
||||
import org.apache.dubbo.config.annotation.DubboReference;
|
||||
import org.dromara.common.core.domain.R;
|
||||
import org.dromara.system.api.RemoteDeviceService;
|
||||
import org.dromara.system.api.domain.bo.RemoteDeviceBo;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* <p>description: </p>
|
||||
*
|
||||
* @author chenle
|
||||
* @date 2021-09-29 9:34
|
||||
*/
|
||||
@Component
|
||||
public class AsyncHandler {
|
||||
|
||||
private Logger logger = LoggerFactory.getLogger(AsyncHandler.class);
|
||||
|
||||
@DubboReference
|
||||
private RemoteDeviceService deviceService;
|
||||
|
||||
|
||||
// @Async(value = "myCacheExecutor")
|
||||
boolean saveGpsAsync(String infoSource, List<RemoteDeviceBo> list) {
|
||||
R response = deviceService.saveDeviceToSt(infoSource,list);
|
||||
if(Objects.isNull(response) || Objects.isNull(response.getCode())){
|
||||
assert response != null;
|
||||
logger.info("返回null,message={}",response.getMsg());
|
||||
return false;
|
||||
}
|
||||
if(200 == response.getCode()){
|
||||
logger.info("success");
|
||||
}else{
|
||||
logger.info("fail,message={},data={}",response.getMsg(),response.getData());
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
@ -1,42 +0,0 @@
|
|||
package org.dromara.basetost.handler;
|
||||
|
||||
import cn.hutool.core.date.DateUtil;
|
||||
import org.dromara.basetost.entity.AbstractGpsEntity;
|
||||
import org.dromara.basetost.entity.Device;
|
||||
import org.dromara.basetost.repository.DeviceRepository;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* <p>description: </p>
|
||||
*
|
||||
* @author chenle
|
||||
* @date 2022-08-05 17:32
|
||||
*/
|
||||
@Component("pdthandler")
|
||||
public class PDTHandler extends AbstractAsyncHandler {
|
||||
|
||||
private Logger logger = LoggerFactory.getLogger(AbstractAsyncHandler.class);
|
||||
|
||||
@Autowired
|
||||
DeviceRepository deviceRepository;
|
||||
|
||||
public PDTHandler() {
|
||||
super();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected List<Device> getGpsInfoByTime(String lastUpdateTime) {
|
||||
|
||||
|
||||
return deviceRepository.findDeviceByUpdateTimeAfterOrderByUpdateTimeAsc(DateUtil.parse(lastUpdateTime));
|
||||
// return deviceRepository.findDeviceByUpdateTimeAfterOrderByUpdateTimeAsc(lastUpdateTime);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -1,90 +0,0 @@
|
|||
package org.dromara.basetost.handler;
|
||||
|
||||
/**
|
||||
* <p>description: </p>
|
||||
*
|
||||
* @author chenle
|
||||
* @date 2022-08-05 12:00
|
||||
*/
|
||||
|
||||
import cn.hutool.core.date.DateTime;
|
||||
import cn.hutool.core.date.DateUtil;
|
||||
import org.dromara.basetost.response.MyBusinessException;
|
||||
import org.dromara.common.core.utils.StringUtils;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.Objects;
|
||||
import java.util.Properties;
|
||||
|
||||
public class PathUtil {
|
||||
|
||||
static String outpath = System.getProperty("user.dir")+File.separator+"conf"+File.separator;//先读取config目录的,没有再加载classpath的
|
||||
|
||||
private static Properties getProperties(String fileName) {
|
||||
|
||||
try {
|
||||
Properties properties = new Properties();
|
||||
InputStream in = new FileInputStream(new File(outpath + fileName));
|
||||
properties.load(in);
|
||||
return properties;
|
||||
} catch (IOException e) {
|
||||
try {
|
||||
Properties properties = new Properties();
|
||||
InputStream in = PathUtil.class.getClassLoader().getResourceAsStream(fileName);//默认加载classpath的
|
||||
if(Objects.isNull(in)){
|
||||
return null;
|
||||
}
|
||||
properties.load(in);
|
||||
return properties;
|
||||
} catch (IOException es) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* 更新properties文件的键值对
|
||||
* 如果该主键已经存在,更新该主键的值;
|
||||
* 如果该主键不存在,则插件一对键值。
|
||||
* @param keyname 键名
|
||||
* @param keyvalue 键值
|
||||
*/
|
||||
public static void updateProperties( String keyname, String keyvalue, String fileName) throws IOException {
|
||||
|
||||
// 调用 Hashtable 的方法 put,使用 getProperty 方法提供并行性。
|
||||
// 强制要求为属性的键和值使用字符串。返回值是 Hashtable 调用 put 的结果。
|
||||
Properties props = PathUtil.getProperties(fileName);
|
||||
OutputStream fos = new FileOutputStream(outpath + fileName);
|
||||
props.setProperty(keyname, keyvalue);
|
||||
// 以适合使用 load 方法加载到 Properties 表中的格式,
|
||||
// 将此 Properties 表中的属性列表(键和元素对)写入输出流
|
||||
props.store(fos, "Update '" + keyname + "' value");
|
||||
|
||||
}
|
||||
|
||||
public static String getValueFromProperties(String propertiesName) {
|
||||
Properties properties = PathUtil.getProperties("ruansi.properties");
|
||||
if(Objects.isNull(properties)){
|
||||
throw new MyBusinessException("jar包所在文件夹下conf子目录下缺少[ruansi.properties] 文件,请新建");
|
||||
}
|
||||
String lastUpdateTime = properties.getProperty(propertiesName);
|
||||
if(StringUtils.isEmpty(lastUpdateTime)){
|
||||
throw new MyBusinessException("[ruansi.properties]文件内缺少["+propertiesName+"]属性");
|
||||
}
|
||||
//checkTimeFormatter(lastUpdateTime);
|
||||
return lastUpdateTime;
|
||||
}
|
||||
|
||||
private static void checkTimeFormatter(String lastUpdateTime) {
|
||||
try {
|
||||
DateTime parse = DateUtil.parse(lastUpdateTime, "yyyy-MM-dd HH:mm:ss");
|
||||
}catch (Exception e){
|
||||
throw new RuntimeException(e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -3,6 +3,8 @@ package org.dromara.basetost.repository;
|
|||
import org.dromara.basetost.entity.Device;
|
||||
import org.springframework.data.jpa.repository.JpaRepository;
|
||||
import org.springframework.data.jpa.repository.JpaSpecificationExecutor;
|
||||
import org.springframework.data.jpa.repository.Query;
|
||||
import org.springframework.data.repository.query.Param;
|
||||
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
|
|
@ -10,6 +12,23 @@ import java.util.List;
|
|||
public interface DeviceRepository extends JpaRepository<Device, Integer>, JpaSpecificationExecutor<Device> {
|
||||
List<Device> findDeviceByUpdateTimeAfterOrderByUpdateTimeAsc(Date updateTime);
|
||||
|
||||
/**
|
||||
* 根据设备编码和信息来源查询设备
|
||||
* @param deviceCode 设备编码
|
||||
* @param infoSource 信息来源
|
||||
* @return 设备
|
||||
*/
|
||||
Device findByDeviceCodeAndInfoSource(String deviceCode, String infoSource);
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* 查询指定信息来源的最新更新时间
|
||||
* @param infoSource 信息来源
|
||||
* @return 最新更新时间
|
||||
*/
|
||||
@Query("SELECT MAX(d.updateTime) FROM Device d WHERE d.infoSource = :infoSource")
|
||||
Date findMaxUpdateTimeByInfoSource(@Param("infoSource") String infoSource);
|
||||
|
||||
// @Query(nativeQuery = true,value = "select * from t_device t1 " +
|
||||
// " where t1.update_time>?1 order by update_time asc limit 50")
|
||||
|
|
|
|||
|
|
@ -3,7 +3,21 @@ package org.dromara.basetost.repository;
|
|||
import org.dromara.basetost.entity.DictData;
|
||||
import org.springframework.data.jpa.repository.JpaRepository;
|
||||
import org.springframework.data.jpa.repository.JpaSpecificationExecutor;
|
||||
import org.springframework.data.jpa.repository.Query;
|
||||
import org.springframework.data.repository.query.Param;
|
||||
|
||||
public interface DictDataRepository extends JpaRepository<DictData, Long>, JpaSpecificationExecutor<DictData> {
|
||||
DictData findDictDataByDictTypeAndDictLabel(String dictType, String dictLabel);
|
||||
|
||||
@Query(value = "SELECT * FROM sys_dict_data WHERE dict_type = :dictType AND dict_label = :dictLabel",
|
||||
nativeQuery = true)
|
||||
DictData findByNativeSQL(@Param("dictType") String dictType, @Param("dictLabel") String dictLabel);
|
||||
|
||||
/**
|
||||
* 根据字典类型和字典值查询字典数据
|
||||
* @param dictType 字典类型
|
||||
* @param dictValue 字典值
|
||||
* @return 字典数据
|
||||
*/
|
||||
DictData findDictDataByDictTypeAndDictValue(String dictType, String dictValue);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,40 @@
|
|||
|
||||
package org.dromara.basetost.repository;
|
||||
|
||||
import org.dromara.basetost.entity.SourceDevice;
|
||||
import org.springframework.data.jpa.repository.JpaRepository;
|
||||
import org.springframework.data.jpa.repository.JpaSpecificationExecutor;
|
||||
import org.springframework.data.jpa.repository.Query;
|
||||
import org.springframework.data.repository.query.Param;
|
||||
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* 源设备数据访问接口
|
||||
*/
|
||||
public interface SourceDeviceRepository extends JpaRepository<SourceDevice, Integer>, JpaSpecificationExecutor<SourceDevice> {
|
||||
|
||||
/**
|
||||
* 根据更新时间查询设备数据
|
||||
* @param updateTime 更新时间
|
||||
* @return 设备列表
|
||||
*/
|
||||
List<SourceDevice> findByUpdateTimeAfterOrderByUpdateTimeAsc(Date updateTime);
|
||||
|
||||
/**
|
||||
* 根据设备编码和信息来源查询设备
|
||||
* @param deviceCode 设备编码
|
||||
* @param infoSource 信息来源
|
||||
* @return 设备
|
||||
*/
|
||||
SourceDevice findByDeviceCodeAndInfoSource(String deviceCode, String infoSource);
|
||||
|
||||
/**
|
||||
* 查询指定信息来源的最新更新时间
|
||||
* @param infoSource 信息来源
|
||||
* @return 最新更新时间
|
||||
*/
|
||||
@Query("SELECT MAX(d.updateTime) FROM SourceDevice d WHERE d.infoSource = :infoSource")
|
||||
Date findMaxUpdateTimeByInfoSource(@Param("infoSource") String infoSource);
|
||||
}
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
|
||||
package org.dromara.basetost.repository;
|
||||
|
||||
import org.dromara.basetost.entity.SourceDeviceV2;
|
||||
import org.springframework.data.jpa.repository.JpaRepository;
|
||||
import org.springframework.data.jpa.repository.JpaSpecificationExecutor;
|
||||
import org.springframework.data.jpa.repository.Query;
|
||||
import org.springframework.data.repository.query.Param;
|
||||
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* 源设备数据访问接口V2 - 第二种表结构
|
||||
*/
|
||||
public interface SourceDeviceV2Repository extends JpaRepository<SourceDeviceV2, Integer>, JpaSpecificationExecutor<SourceDeviceV2> {
|
||||
|
||||
/**
|
||||
* 根据更新时间查询设备数据
|
||||
* @param updateTime 更新时间
|
||||
* @return 设备列表
|
||||
*/
|
||||
List<SourceDeviceV2> findByUpdateTimeAfterOrderByUpdateTimeAsc(Date updateTime);
|
||||
|
||||
/**
|
||||
* 根据设备编码和信息来源查询设备
|
||||
* @param deviceCode 设备编码
|
||||
* @param infoSource 信息来源
|
||||
* @return 设备
|
||||
*/
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -1,13 +1,12 @@
|
|||
package org.dromara.basetost.schedule;
|
||||
|
||||
import org.dromara.basetost.handler.AbstractAsyncHandler;
|
||||
import org.dromara.basetost.service.DeviceSyncService;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.beans.factory.annotation.Qualifier;
|
||||
import org.springframework.scheduling.annotation.Scheduled;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
/**
|
||||
* <p>description: </p>
|
||||
* 数据同步调度任务
|
||||
*
|
||||
* @author chenle
|
||||
* @date 2023-05-22 9:24
|
||||
|
|
@ -17,12 +16,14 @@ public class BaseToSTSchedule {
|
|||
|
||||
|
||||
@Autowired
|
||||
@Qualifier(value = "pdthandler")
|
||||
AbstractAsyncHandler abstractAsyncHandler;
|
||||
private DeviceSyncService deviceSyncService;
|
||||
|
||||
|
||||
@Scheduled(cron = "${devicecorn:0/30 * * * * ?}")
|
||||
public void sendToSt(){
|
||||
abstractAsyncHandler.saveBaseToST();
|
||||
/**
|
||||
* 从源数据库同步设备数据
|
||||
*/
|
||||
@Scheduled(cron = "${device-sync.cron:0 0/10 * * * ?}")
|
||||
public void syncDevicesFromSource() {
|
||||
deviceSyncService.syncDevices();
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,215 @@
|
|||
|
||||
package org.dromara.basetost.service;
|
||||
|
||||
import cn.hutool.core.bean.BeanUtil;
|
||||
import cn.hutool.core.date.DateUtil;
|
||||
import com.baomidou.dynamic.datasource.annotation.DS;
|
||||
import org.dromara.basetost.config.DeviceSyncConfig;
|
||||
import org.dromara.basetost.entity.Device;
|
||||
import org.dromara.basetost.entity.DictData;
|
||||
import org.dromara.basetost.entity.SourceDevice;
|
||||
import org.dromara.basetost.entity.SourceDeviceV2;
|
||||
import org.dromara.basetost.repository.DeviceRepository;
|
||||
import org.dromara.basetost.repository.DictDataRepository;
|
||||
import org.dromara.basetost.repository.SourceDeviceRepository;
|
||||
import org.dromara.basetost.repository.SourceDeviceV2Repository;
|
||||
import org.dromara.basetost.util.DataSourceContextHolder;
|
||||
import org.dromara.basetost.util.FieldMapper;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.transaction.annotation.Transactional;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* 设备数据同步服务
|
||||
*/
|
||||
@Service
|
||||
public class DeviceSyncService {
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(DeviceSyncService.class);
|
||||
|
||||
@Autowired
|
||||
private DeviceSyncConfig deviceSyncConfig;
|
||||
|
||||
@Autowired
|
||||
private DeviceRepository deviceRepository;
|
||||
|
||||
@Autowired
|
||||
private SourceDeviceRepository sourceDeviceRepository;
|
||||
|
||||
@Autowired
|
||||
private SourceDeviceV2Repository sourceDeviceV2Repository;
|
||||
|
||||
@Autowired
|
||||
private DictDataRepository dictDataRepository;
|
||||
|
||||
/**
|
||||
* 从源数据库同步设备数据
|
||||
*/
|
||||
@DS("target")
|
||||
public void syncDevices() {
|
||||
logger.info("开始同步设备数据,infoSource: {}", deviceSyncConfig.getInfoSource());
|
||||
|
||||
try {
|
||||
// 1. 查询目标表中该info_source的最新更新时间
|
||||
Date lastUpdateTime = deviceRepository.findMaxUpdateTimeByInfoSource(deviceSyncConfig.getInfoSource());
|
||||
if (lastUpdateTime == null) {
|
||||
// 如果没有数据,使用一个较早的时间
|
||||
lastUpdateTime = DateUtil.parse("2020-01-01 00:00:00");
|
||||
logger.info("目标表中无该info_source的数据,使用初始时间: {}", lastUpdateTime);
|
||||
} else {
|
||||
logger.info("目标表中该info_source的最新更新时间: {}", lastUpdateTime);
|
||||
}
|
||||
|
||||
// 2. 从源数据库查询更新的数据
|
||||
List<SourceDevice> sourceDevices = getUpdatedDevicesFromSource(lastUpdateTime);
|
||||
logger.info("从源数据库查询到 {} 条更新的数据", sourceDevices.size());
|
||||
|
||||
if (CollectionUtils.isEmpty(sourceDevices)) {
|
||||
logger.info("源数据库无更新的数据");
|
||||
return;
|
||||
}
|
||||
|
||||
// 3. 处理设备类型映射
|
||||
mapDeviceTypes(sourceDevices);
|
||||
|
||||
// 4. 同步数据到目标表
|
||||
syncDevicesToTarget(sourceDevices);
|
||||
|
||||
logger.info("同步设备数据完成,infoSource: {}", deviceSyncConfig.getInfoSource());
|
||||
} catch (Exception e) {
|
||||
logger.error("同步设备数据失败: {}", e.getMessage(), e);
|
||||
throw new RuntimeException("同步设备数据失败", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 从源数据库查询更新的数据
|
||||
* @param lastUpdateTime 最后更新时间
|
||||
* @return 更新的设备列表
|
||||
*/
|
||||
public List<SourceDevice> getUpdatedDevicesFromSource(Date lastUpdateTime) {
|
||||
// 切换到源数据源
|
||||
DataSourceContextHolder.setDataSource("source");
|
||||
try {
|
||||
// 根据源表结构类型查询更新的数据
|
||||
if ("v2".equals(deviceSyncConfig.getSourceTableType())) {
|
||||
// 使用第二种表结构
|
||||
List<SourceDeviceV2> v2Devices = sourceDeviceV2Repository.findByUpdateTimeAfterOrderByUpdateTimeAsc(lastUpdateTime);
|
||||
// 转换为SourceDevice
|
||||
return convertV2ToV1(v2Devices);
|
||||
} else {
|
||||
// 使用第一种表结构
|
||||
return sourceDeviceRepository.findByUpdateTimeAfterOrderByUpdateTimeAsc(lastUpdateTime);
|
||||
}
|
||||
} finally {
|
||||
// 恢复到默认数据源
|
||||
DataSourceContextHolder.clearDataSource();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 将SourceDeviceV2转换为SourceDevice
|
||||
* @param v2Devices SourceDeviceV2列表
|
||||
* @return SourceDevice列表
|
||||
*/
|
||||
private List<SourceDevice> convertV2ToV1(List<SourceDeviceV2> v2Devices) {
|
||||
if (CollectionUtils.isEmpty(v2Devices)) {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
|
||||
List<SourceDevice> v1Devices = new ArrayList<>();
|
||||
for (SourceDeviceV2 v2Device : v2Devices) {
|
||||
SourceDevice v1Device = new SourceDevice();
|
||||
// 复制相同名称的字段
|
||||
BeanUtil.copyProperties(v2Device, v1Device);
|
||||
v1Devices.add(v1Device);
|
||||
}
|
||||
return v1Devices;
|
||||
}
|
||||
|
||||
/**
|
||||
* 处理设备类型映射
|
||||
* @param sourceDevices 源设备列表
|
||||
*/
|
||||
@DS("source")
|
||||
private void mapDeviceTypes(List<SourceDevice> sourceDevices) {
|
||||
DataSourceContextHolder.setDataSource("source");
|
||||
try {
|
||||
for (SourceDevice sourceDevice : sourceDevices) {
|
||||
String sourceDeviceType = sourceDevice.getDeviceType();
|
||||
if (sourceDeviceType == null || sourceDeviceType.isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// 从字典表查询设备类型映射
|
||||
// dict_value是源表的device_type值,dict_label是目标表的device_type值
|
||||
DictData dictData = dictDataRepository.findByNativeSQL(
|
||||
deviceSyncConfig.getDictType(), sourceDeviceType);
|
||||
String targetDeviceType = "99"; // 默认值为"99"(其他)
|
||||
if (!Objects.isNull(dictData) && dictData.getDictValue() != null) {
|
||||
targetDeviceType = dictData.getDictValue();
|
||||
}
|
||||
|
||||
sourceDevice.setDeviceType(targetDeviceType);
|
||||
}
|
||||
logger.info("已处理完类型转换数据:{}",sourceDevices.size());
|
||||
}finally {
|
||||
DataSourceContextHolder.clearDataSource();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* 同步设备数据到目标表
|
||||
* @param sourceDevices 源设备列表
|
||||
*/
|
||||
@DS("target")
|
||||
public void syncDevicesToTarget(List<SourceDevice> sourceDevices) {
|
||||
if (CollectionUtils.isEmpty(sourceDevices)) {
|
||||
return;
|
||||
}
|
||||
|
||||
int batchSize = deviceSyncConfig.getBatchSize();
|
||||
int size = sourceDevices.size();
|
||||
int batchCount = (size + batchSize - 1) / batchSize;
|
||||
|
||||
for (int i = 0; i < batchCount; i++) {
|
||||
int fromIndex = i * batchSize;
|
||||
int toIndex = Math.min((i + 1) * batchSize, size);
|
||||
List<SourceDevice> batch = sourceDevices.subList(fromIndex, toIndex);
|
||||
|
||||
// 批量处理
|
||||
for (SourceDevice sourceDevice : batch) {
|
||||
// 查询目标表中是否存在该设备
|
||||
Device existingDevice = deviceRepository.findByDeviceCodeAndInfoSource(
|
||||
sourceDevice.getDeviceCode(), deviceSyncConfig.getInfoSource());
|
||||
|
||||
// 转换为目标设备
|
||||
Device targetDevice = FieldMapper.mapToTargetDevice(sourceDevice, deviceSyncConfig.getInfoSource());
|
||||
if ("05".equals(targetDevice.getDeviceType()) && targetDevice.getDeviceCode().length()!=20){
|
||||
targetDevice.setGbbm(targetDevice.getRemark1());
|
||||
}else if ("05".equals(targetDevice.getDeviceType())){
|
||||
targetDevice.setGbbm(targetDevice.getDeviceCode());
|
||||
}
|
||||
if (existingDevice != null) {
|
||||
// 更新现有设备
|
||||
targetDevice.setId(existingDevice.getId());
|
||||
deviceRepository.save(targetDevice);
|
||||
} else {
|
||||
// 插入新设备
|
||||
deviceRepository.save(targetDevice);
|
||||
}
|
||||
}
|
||||
|
||||
logger.info("已处理 {}/{} 批次数据", i + 1, batchCount);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
|
||||
package org.dromara.basetost.util;
|
||||
|
||||
import com.baomidou.dynamic.datasource.toolkit.DynamicDataSourceContextHolder;
|
||||
|
||||
/**
|
||||
* 数据源切换工具类
|
||||
*/
|
||||
public class DataSourceContextHolder {
|
||||
|
||||
/**
|
||||
* 切换数据源
|
||||
* @param dataSourceName 数据源名称
|
||||
*/
|
||||
public static void setDataSource(String dataSourceName) {
|
||||
DynamicDataSourceContextHolder.push(dataSourceName);
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取当前数据源
|
||||
* @return 数据源名称
|
||||
*/
|
||||
public static String getDataSource() {
|
||||
return DynamicDataSourceContextHolder.peek();
|
||||
}
|
||||
|
||||
/**
|
||||
* 清除数据源
|
||||
*/
|
||||
public static void clearDataSource() {
|
||||
DynamicDataSourceContextHolder.clear();
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,66 @@
|
|||
|
||||
package org.dromara.basetost.util;
|
||||
|
||||
import cn.hutool.core.bean.BeanUtil;
|
||||
import org.dromara.basetost.entity.Device;
|
||||
import org.dromara.basetost.entity.SourceDevice;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* 字段映射工具类
|
||||
* 用于处理源表和目标表之间的字段映射
|
||||
*/
|
||||
public class FieldMapper {
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(FieldMapper.class);
|
||||
|
||||
/**
|
||||
* 将源设备转换为目标设备
|
||||
* @param sourceDevice 源设备
|
||||
* @param infoSource 信息来源
|
||||
* @return 目标设备
|
||||
*/
|
||||
public static Device mapToTargetDevice(SourceDevice sourceDevice, String infoSource) {
|
||||
Device targetDevice = new Device();
|
||||
|
||||
try {
|
||||
// 复制相同名称的字段
|
||||
BeanUtil.copyProperties(sourceDevice, targetDevice);
|
||||
|
||||
// 设置info_source
|
||||
targetDevice.setInfoSource(infoSource);
|
||||
|
||||
// 处理字段名差异
|
||||
if (sourceDevice.getPoliceNo() != null) {
|
||||
targetDevice.setPoliceNo(sourceDevice.getPoliceNo());
|
||||
}
|
||||
if (sourceDevice.getPoliceName() != null) {
|
||||
targetDevice.setPoliceName(sourceDevice.getPoliceName());
|
||||
}
|
||||
if (sourceDevice.getPhoneNum() != null) {
|
||||
targetDevice.setPhoneNum(sourceDevice.getPhoneNum());
|
||||
}
|
||||
if (sourceDevice.getCarNum() != null) {
|
||||
targetDevice.setCarNum(sourceDevice.getCarNum());
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
logger.error("字段映射失败: {}", e.getMessage(), e);
|
||||
}
|
||||
|
||||
return targetDevice;
|
||||
}
|
||||
|
||||
/**
|
||||
* 将源设备列表转换为目标设备列表
|
||||
* @param sourceDevices 源设备列表
|
||||
* @param infoSource 信息来源
|
||||
* @return 目标设备列表
|
||||
*/
|
||||
public static java.util.List<Device> mapToTargetDevices(java.util.List<SourceDevice> sourceDevices, String infoSource) {
|
||||
return sourceDevices.stream()
|
||||
.map(sourceDevice -> mapToTargetDevice(sourceDevice, infoSource))
|
||||
.collect(java.util.stream.Collectors.toList());
|
||||
}
|
||||
}
|
||||
|
|
@ -8,38 +8,72 @@ spring:
|
|||
application:
|
||||
# 应用名称
|
||||
name: stwzhj-basetost
|
||||
profiles:
|
||||
# 环境配置
|
||||
active: dev
|
||||
|
||||
--- # nacos 配置
|
||||
spring:
|
||||
datasource:
|
||||
url: jdbc:mysql://localhost:3306/wzhj_hs?useUnicode=true&characterEncoding=utf-8&serverTimezone=Asia/Shanghai&useSSL=false
|
||||
dynamic:
|
||||
primary: target #设置默认数据源为目标数据库
|
||||
strict: false #严格匹配数据源
|
||||
datasource:
|
||||
# 源数据库(MySQL) - 每个地市配置不同的源数据库
|
||||
source:
|
||||
url: jdbc:mysql://53.248.2.141:3306/wzhj-bz?useUnicode=true&characterEncoding=utf8&zeroDateTimeBehavior=convertToNull&useSSL=false&serverTimezone=GMT%2B8&rewriteBatchedStatements=true&allowPublicKeyRetrieval=true
|
||||
username: root
|
||||
password: root
|
||||
driver-class-name: com.mysql.jdbc.Driver
|
||||
type: com.zaxxer.hikari.HikariDataSource
|
||||
password: Ycgis!2509
|
||||
driver-class-name: com.mysql.cj.jdbc.Driver
|
||||
hikari:
|
||||
maximum-pool-size: 3 # 每个实例最多3个连接
|
||||
minimum-idle: 1 # 最少1个空闲连接
|
||||
idle-timeout: 300000 # 5分钟空闲超时
|
||||
connection-timeout: 30000
|
||||
max-lifetime: 600000 # 10分钟生命周期
|
||||
|
||||
# 目标数据库(PostgreSQL) - 所有地市使用相同的目标数据库
|
||||
target:
|
||||
url: jdbc:postgresql://53.16.17.15:5432/wzhj?useUnicode=true&characterEncoding=utf8&useSSL=true&autoReconnect=true&reWriteBatchedInserts=true&stringtype=unspecified
|
||||
username: pgsql
|
||||
password: ycgis
|
||||
driver-class-name: org.postgresql.Driver
|
||||
hikari:
|
||||
maximum-pool-size: 3 # 每个实例最多3个连接
|
||||
minimum-idle: 1
|
||||
idle-timeout: 300000
|
||||
connection-timeout: 30000
|
||||
max-lifetime: 600000
|
||||
jpa:
|
||||
show-sql: true
|
||||
show-sql: false
|
||||
hibernate:
|
||||
naming:
|
||||
physical-strategy: org.hibernate.boot.model.naming.PhysicalNamingStrategyStandardImpl
|
||||
cloud:
|
||||
nacos:
|
||||
# nacos 服务地址
|
||||
server-addr: 127.0.0.1:8848
|
||||
username: nacos
|
||||
password: nacos
|
||||
discovery:
|
||||
# 注册组
|
||||
group: DEFAULT_GROUP
|
||||
namespace: ${spring.profiles.active}
|
||||
config:
|
||||
# 配置组
|
||||
group: DEFAULT_GROUP
|
||||
namespace: ${spring.profiles.active}
|
||||
config:
|
||||
import:
|
||||
- optional:nacos:application-common.yml
|
||||
properties:
|
||||
hibernate:
|
||||
format_sql: true # 格式化 SQL,便于阅读
|
||||
use_sql_comments: true # 添加注释,显示这是什么操作
|
||||
dialect: org.hibernate.dialect.PostgreSQLDialect
|
||||
|
||||
# 设备数据同步配置
|
||||
device-sync:
|
||||
# 信息来源标识 - 每个地市配置不同的info_source
|
||||
info-source: "3416"
|
||||
# 源表结构类型 - v1或v2
|
||||
# v1: 第一种表结构,字段使用下划线命名(如police_no)
|
||||
# v2: 第二种表结构,部分字段使用驼峰命名(如policeNo)
|
||||
source-table-type: v1
|
||||
# 设备类型映射字典类型
|
||||
dict-type: device_type_tost
|
||||
# 批量插入大小
|
||||
batch-size: 50
|
||||
# 定时任务cron表达式
|
||||
cron: 0 0/10 * * * ?
|
||||
|
||||
# 日志配置
|
||||
logging:
|
||||
level:
|
||||
org.springframework: warn
|
||||
org.mybatis.spring.mapper: error
|
||||
org.springframework.context.support.PostProcessorRegistrationDelegate: error
|
||||
# JPA 日志配置
|
||||
org.hibernate.SQL: debug
|
||||
org.hibernate.orm.jdbc.bind: trace # Hibernate 6
|
||||
# 如果你还在使用 Hibernate 5,取消下面的注释
|
||||
# org.hibernate.type.descriptor.sql.BasicBinder: trace
|
||||
config: classpath:logback-plus.xml
|
||||
|
||||
|
|
|
|||
|
|
@ -1,28 +1,49 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<configuration scan="true" scanPeriod="60 seconds" debug="false">
|
||||
<!-- 日志存放路径 -->
|
||||
<property name="log.path" value="logs/${project.artifactId}" />
|
||||
<property name="log.path" value="logs" />
|
||||
<property name="log.file" value="base2st" />
|
||||
<property name="MAX_FILE_SIZE" value="50MB" />
|
||||
<property name="MAX_HISTORY" value="30" />
|
||||
<!-- 日志输出格式 -->
|
||||
<property name="console.log.pattern"
|
||||
value="%red(%d{yyyy-MM-dd HH:mm:ss}) %green([%thread]) %highlight(%-5level) %boldMagenta(%logger{36}%n) - %msg%n"/>
|
||||
|
||||
<!-- 控制台输出 -->
|
||||
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<!-- INFO日志Appender -->
|
||||
<appender name="FILE_INFO" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.path}/info.${log.file}.log</file>
|
||||
<filter class="ch.qos.logback.classic.filter.LevelFilter">
|
||||
<level>INFO</level>
|
||||
<onMatch>ACCEPT</onMatch>
|
||||
<onMismatch>DENY</onMismatch>
|
||||
</filter>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${log.path}/info/info.${log.file}.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
|
||||
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
|
||||
<maxHistory>${MAX_HISTORY}</maxHistory>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>${console.log.pattern}</pattern>
|
||||
<charset>utf-8</charset>
|
||||
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<include resource="logback-common.xml" />
|
||||
<!-- ERROR日志Appender -->
|
||||
<appender name="FILE_ERROR" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.path}/error.${log.file}.log</file>
|
||||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||
<level>ERROR</level>
|
||||
</filter>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${log.path}/error/error.${log.file}.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
|
||||
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
|
||||
<maxHistory>${MAX_HISTORY}</maxHistory>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<include resource="logback-logstash.xml" />
|
||||
|
||||
<!-- 开启 skywalking 日志收集 -->
|
||||
<include resource="logback-skylog.xml" />
|
||||
|
||||
<!--系统操作日志-->
|
||||
<root level="info">
|
||||
<appender-ref ref="console" />
|
||||
<!-- 根Logger配置(禁用控制台输出) -->
|
||||
<root level="INFO">
|
||||
<appender-ref ref="FILE_INFO" />
|
||||
<appender-ref ref="FILE_ERROR" />
|
||||
</root>
|
||||
|
||||
</configuration>
|
||||
|
|
|
|||
|
|
@ -96,11 +96,6 @@
|
|||
<artifactId>stwzhj-api-data2es</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>2.4.0-hw-ei-302002</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.ruansee.app</groupId>
|
||||
|
|
@ -137,16 +132,48 @@
|
|||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.12</artifactId>
|
||||
<version>3.6.1-h0.cbu.mrs.350.r11</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.apache.zookeeper</groupId>
|
||||
<artifactId>zookeeper</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>net.sf.jopt-simple</groupId>
|
||||
<artifactId>jopt-simple</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>com.huawei.mrs</groupId>
|
||||
<artifactId>manager-wc2frm</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.xerial.snappy</groupId>
|
||||
<artifactId>snappy-java</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>com.huawei.mrs</groupId>
|
||||
<artifactId>om-controller-api</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>com.101tec</groupId>
|
||||
<artifactId>zkclient</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>3.6.1-h0.cbu.mrs.350.r11</version>
|
||||
</dependency>
|
||||
|
||||
|
||||
|
||||
</dependencies>
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import com.ruansee.redis.JedisConfig;
|
|||
import com.ruansee.redis.RedisConfig;
|
||||
import com.ruansee.redis.RedisUtil;
|
||||
import com.ruansee.redis.RedissionLockUtil;
|
||||
import org.dromara.kafka.consumer.config.KafkaPropertiesConfig;
|
||||
import org.redisson.spring.starter.RedissonAutoConfiguration;
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||
|
|
@ -25,7 +24,6 @@ import org.springframework.scheduling.annotation.EnableAsync;
|
|||
*/
|
||||
@SpringBootApplication
|
||||
@EnableAsync
|
||||
@EnableConfigurationProperties({KafkaPropertiesConfig.class})
|
||||
@ServletComponentScan
|
||||
public class KafkaConsumerApplication {
|
||||
public static void main(String[] args){
|
||||
|
|
|
|||
|
|
@ -13,9 +13,10 @@ public final class KafkaProperties
|
|||
private static final Logger LOG = LoggerFactory.getLogger(KafkaProperties.class);
|
||||
|
||||
// Topic名称,安全模式下,需要以管理员用户添加当前用户的访问权限
|
||||
public final static String TOPIC = "t_gps_realtime";
|
||||
public final static String TOPIC = "jysb_dwxx";
|
||||
|
||||
private static Properties serverProps = new Properties();
|
||||
|
||||
private static Properties producerProps = new Properties();
|
||||
|
||||
private static Properties consumerProps = new Properties();
|
||||
|
|
@ -26,8 +27,9 @@ public final class KafkaProperties
|
|||
|
||||
private KafkaProperties()
|
||||
{
|
||||
String filePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator;
|
||||
|
||||
// String filePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator;
|
||||
String filePath = "/home/rsoft/config/";
|
||||
LOG.info("路径=={}",filePath);
|
||||
try
|
||||
{
|
||||
File proFile = new File(filePath + "producer.properties");
|
||||
|
|
|
|||
|
|
@ -1,35 +0,0 @@
|
|||
package org.dromara.kafka.consumer.config;
|
||||
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.context.annotation.Profile;
|
||||
|
||||
/**
|
||||
* <p>description: </p>
|
||||
*
|
||||
* @author chenle
|
||||
* @date 2021-09-06 15:13
|
||||
*/
|
||||
@ConfigurationProperties(prefix = "mykafka")
|
||||
@Profile(value = "dev")
|
||||
public
|
||||
class KafkaPropertiesConfig {
|
||||
private String serverUrl;
|
||||
|
||||
private MyConsumerProperties consumerProperties = new MyConsumerProperties();
|
||||
|
||||
public String getServerUrl() {
|
||||
return serverUrl;
|
||||
}
|
||||
|
||||
public void setServerUrl(String serverUrl) {
|
||||
this.serverUrl = serverUrl;
|
||||
}
|
||||
|
||||
public MyConsumerProperties getConsumerProperties() {
|
||||
return consumerProperties;
|
||||
}
|
||||
|
||||
public void setConsumerProperties(MyConsumerProperties consumerProperties) {
|
||||
this.consumerProperties = consumerProperties;
|
||||
}
|
||||
}
|
||||
|
|
@ -4,17 +4,19 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.util.Properties;
|
||||
|
||||
public class LoginUtil
|
||||
{
|
||||
public class LoginUtil {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(LoginUtil.class);
|
||||
|
||||
static Logger logger = LoggerFactory.getLogger(LoginUtil.class);
|
||||
|
||||
public enum Module
|
||||
{
|
||||
STORM("StormClient"), KAFKA("KafkaClient"), ZOOKEEPER("Client");
|
||||
/**
|
||||
* no JavaDoc
|
||||
*/
|
||||
public enum Module {
|
||||
KAFKA("KafkaClient"), ZOOKEEPER("Client");
|
||||
|
||||
private String name;
|
||||
|
||||
|
|
@ -77,8 +79,7 @@ public class LoginUtil
|
|||
* @throws IOException
|
||||
*/
|
||||
public static void setJaasFile(String principal, String keytabPath)
|
||||
throws IOException
|
||||
{
|
||||
throws IOException {
|
||||
String jaasPath =
|
||||
new File(System.getProperty("java.io.tmpdir")) + File.separator + System.getProperty("user.name")
|
||||
+ JAAS_POSTFIX;
|
||||
|
|
@ -88,7 +89,6 @@ public class LoginUtil
|
|||
// 删除jaas文件
|
||||
deleteJaasFile(jaasPath);
|
||||
writeJaasFile(jaasPath, principal, keytabPath);
|
||||
logger.error("jaasPath--{}",jaasPath);
|
||||
System.setProperty(JAVA_SECURITY_LOGIN_CONF, jaasPath);
|
||||
}
|
||||
|
||||
|
|
@ -99,8 +99,7 @@ public class LoginUtil
|
|||
* @throws IOException
|
||||
*/
|
||||
public static void setZookeeperServerPrincipal(String zkServerPrincipal)
|
||||
throws IOException
|
||||
{
|
||||
throws IOException {
|
||||
System.setProperty(ZOOKEEPER_AUTH_PRINCIPAL, zkServerPrincipal);
|
||||
String ret = System.getProperty(ZOOKEEPER_AUTH_PRINCIPAL);
|
||||
if (ret == null)
|
||||
|
|
@ -120,8 +119,7 @@ public class LoginUtil
|
|||
* @throws IOException
|
||||
*/
|
||||
public static void setKrb5Config(String krb5ConfFile)
|
||||
throws IOException
|
||||
{
|
||||
throws IOException {
|
||||
System.setProperty(JAVA_SECURITY_KRB5_CONF, krb5ConfFile);
|
||||
String ret = System.getProperty(JAVA_SECURITY_KRB5_CONF);
|
||||
if (ret == null)
|
||||
|
|
@ -141,8 +139,7 @@ public class LoginUtil
|
|||
* 写文件异常
|
||||
*/
|
||||
private static void writeJaasFile(String jaasPath, String principal, String keytabPath)
|
||||
throws IOException
|
||||
{
|
||||
throws IOException {
|
||||
FileWriter writer = new FileWriter(new File(jaasPath));
|
||||
try
|
||||
{
|
||||
|
|
@ -160,8 +157,7 @@ public class LoginUtil
|
|||
}
|
||||
|
||||
private static void deleteJaasFile(String jaasPath)
|
||||
throws IOException
|
||||
{
|
||||
throws IOException {
|
||||
File jaasFile = new File(jaasPath);
|
||||
if (jaasFile.exists())
|
||||
{
|
||||
|
|
@ -172,8 +168,7 @@ public class LoginUtil
|
|||
}
|
||||
}
|
||||
|
||||
private static String getJaasConfContext(String principal, String keytabPath)
|
||||
{
|
||||
private static String getJaasConfContext(String principal, String keytabPath) {
|
||||
Module[] allModule = Module.values();
|
||||
StringBuilder builder = new StringBuilder();
|
||||
for (Module modlue : allModule)
|
||||
|
|
@ -183,11 +178,9 @@ public class LoginUtil
|
|||
return builder.toString();
|
||||
}
|
||||
|
||||
private static String getModuleContext(String userPrincipal, String keyTabPath, Module module)
|
||||
{
|
||||
private static String getModuleContext(String userPrincipal, String keyTabPath, Module module) {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
if (IS_IBM_JDK)
|
||||
{
|
||||
if (IS_IBM_JDK) {
|
||||
builder.append(module.getName()).append(" {").append(LINE_SEPARATOR);
|
||||
builder.append(IBM_LOGIN_MODULE).append(LINE_SEPARATOR);
|
||||
builder.append("credsType=both").append(LINE_SEPARATOR);
|
||||
|
|
@ -195,9 +188,7 @@ public class LoginUtil
|
|||
builder.append("useKeytab=\"" + keyTabPath + "\"").append(LINE_SEPARATOR);
|
||||
builder.append("debug=true;").append(LINE_SEPARATOR);
|
||||
builder.append("};").append(LINE_SEPARATOR);
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
builder.append(module.getName()).append(" {").append(LINE_SEPARATOR);
|
||||
builder.append(SUN_LOGIN_MODULE).append(LINE_SEPARATOR);
|
||||
builder.append("useKeyTab=true").append(LINE_SEPARATOR);
|
||||
|
|
@ -211,4 +202,58 @@ public class LoginUtil
|
|||
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
public static void securityPrepare(String principal, String keyTabFile) throws IOException {
|
||||
// String filePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator;
|
||||
String filePath = "/home/rsoft/config/";
|
||||
String krbFile = filePath + "krb5.conf";
|
||||
String userKeyTableFile = filePath + keyTabFile;
|
||||
|
||||
// windows路径下分隔符替换
|
||||
userKeyTableFile = userKeyTableFile.replace("\\", "\\\\");
|
||||
krbFile = krbFile.replace("\\", "\\\\");
|
||||
|
||||
LoginUtil.setKrb5Config(krbFile);
|
||||
LoginUtil.setZookeeperServerPrincipal("zookeeper/hadoop.hadoop.com");
|
||||
LoginUtil.setJaasFile(principal, userKeyTableFile);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check security mode
|
||||
*
|
||||
* @return boolean
|
||||
*/
|
||||
public static Boolean isSecurityModel() {
|
||||
Boolean isSecurity = false;
|
||||
// String krbFilePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator + "kafkaSecurityMode";
|
||||
String krbFilePath = "/home/rsoft/config/kafkaSecurityMode";
|
||||
Properties securityProps = new Properties();
|
||||
|
||||
// file does not exist.
|
||||
if (!isFileExists(krbFilePath)) {
|
||||
return isSecurity;
|
||||
}
|
||||
|
||||
try {
|
||||
securityProps.load(new FileInputStream(krbFilePath));
|
||||
|
||||
if ("yes".equalsIgnoreCase(securityProps.getProperty("kafka.client.security.mode")))
|
||||
{
|
||||
isSecurity = true;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.info("The Exception occured : {}.", e);
|
||||
}
|
||||
|
||||
return isSecurity;
|
||||
}
|
||||
|
||||
/*
|
||||
* 判断文件是否存在
|
||||
*/
|
||||
private static boolean isFileExists(String fileName) {
|
||||
File file = new File(fileName);
|
||||
|
||||
return file.exists();
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,28 +0,0 @@
|
|||
package org.dromara.kafka.consumer.config;
|
||||
|
||||
/**
|
||||
* <p>description: </p>
|
||||
*
|
||||
* @author chenle
|
||||
* @date 2021-09-07 14:54
|
||||
*/
|
||||
public class MyConsumerProperties {
|
||||
private String clientId;
|
||||
private String groupId = "222";
|
||||
|
||||
public String getClientId() {
|
||||
return clientId;
|
||||
}
|
||||
|
||||
public void setClientId(String clientId) {
|
||||
this.clientId = clientId;
|
||||
}
|
||||
|
||||
public String getGroupId() {
|
||||
return groupId;
|
||||
}
|
||||
|
||||
public void setGroupId(String groupId) {
|
||||
this.groupId = groupId;
|
||||
}
|
||||
}
|
||||
|
|
@ -1,159 +0,0 @@
|
|||
package org.dromara.kafka.consumer.config;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.dromara.kafka.consumer.handler.KafkaSecurityUtil;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Properties;
|
||||
|
||||
|
||||
public class NewConsumer extends Thread{
|
||||
private static final Logger LOG = LoggerFactory.getLogger(NewConsumer.class);
|
||||
|
||||
private final KafkaConsumer<Integer, String> consumer;
|
||||
|
||||
private final String topic;
|
||||
|
||||
// 一次请求的最大等待时间
|
||||
private final int waitTime = 10000;
|
||||
|
||||
// Broker连接地址
|
||||
private final String bootstrapServers = "bootstrap.servers";
|
||||
// Group id
|
||||
private final String groupId = "group.id";
|
||||
// 消息内容使用的反序列化类
|
||||
private final String valueDeserializer = "value.deserializer";
|
||||
// 消息Key值使用的反序列化类
|
||||
private final String keyDeserializer = "key.deserializer";
|
||||
// 协议类型:当前支持配置为SASL_PLAINTEXT或者PLAINTEXT
|
||||
private final String securityProtocol = "security.protocol";
|
||||
// 服务名
|
||||
private final String saslKerberosServiceName = "sasl.kerberos.service.name";
|
||||
// 域名
|
||||
private final String kerberosDomainName = "kerberos.domain.name";
|
||||
// 是否自动提交offset
|
||||
private final String enableAutoCommit = "enable.auto.commit";
|
||||
// 自动提交offset的时间间隔
|
||||
private final String autoCommitIntervalMs = "auto.commit.interval.ms";
|
||||
|
||||
// 会话超时时间
|
||||
private final String sessionTimeoutMs = "session.timeout.ms";
|
||||
|
||||
/**
|
||||
* 用户自己申请的机机账号keytab文件名称
|
||||
*/
|
||||
private static final String USER_KEYTAB_FILE = "user.keytab";
|
||||
|
||||
/**
|
||||
* 用户自己申请的机机账号名称
|
||||
*/
|
||||
private static final String USER_PRINCIPAL = "aqdsj_ruansi";
|
||||
|
||||
/**
|
||||
* NewConsumer构造函数
|
||||
* @param topic 订阅的Topic名称
|
||||
*/
|
||||
public NewConsumer(String topic) {
|
||||
|
||||
Properties props = new Properties();
|
||||
|
||||
KafkaProperties kafkaProc = KafkaProperties.getInstance();
|
||||
// Broker连接地址
|
||||
props.put(bootstrapServers,
|
||||
kafkaProc.getValues(bootstrapServers, "localhost:21007"));
|
||||
// Group id
|
||||
props.put(groupId, "DemoConsumer");
|
||||
// 是否自动提交offset
|
||||
props.put(enableAutoCommit, "true");
|
||||
// 自动提交offset的时间间隔
|
||||
props.put(autoCommitIntervalMs, "1000");
|
||||
// 会话超时时间
|
||||
props.put(sessionTimeoutMs, "30000");
|
||||
// 消息Key值使用的反序列化类
|
||||
props.put(keyDeserializer,
|
||||
"org.apache.kafka.common.serialization.IntegerDeserializer");
|
||||
// 消息内容使用的反序列化类
|
||||
props.put(valueDeserializer,
|
||||
"org.apache.kafka.common.serialization.StringDeserializer");
|
||||
// 安全协议类型
|
||||
props.put(securityProtocol, kafkaProc.getValues(securityProtocol, "SASL_PLAINTEXT"));
|
||||
// 服务名
|
||||
props.put(saslKerberosServiceName, "kafka");
|
||||
// 域名
|
||||
props.put(kerberosDomainName, kafkaProc.getValues(kerberosDomainName, "hadoop.hadoop.com"));
|
||||
consumer = new KafkaConsumer<Integer, String>(props);
|
||||
this.topic = topic;
|
||||
}
|
||||
|
||||
/**
|
||||
* 订阅Topic的消息处理函数
|
||||
*/
|
||||
public void doWork()
|
||||
{
|
||||
// 订阅
|
||||
consumer.subscribe(Collections.singletonList(this.topic));
|
||||
// 消息消费请求
|
||||
ConsumerRecords<Integer, String> records = consumer.poll(waitTime);
|
||||
// 消息处理
|
||||
for (ConsumerRecord<Integer, String> record : records)
|
||||
{
|
||||
LOG.info("[NewConsumerExample], Received message: (" + record.key() + ", " + record.value()
|
||||
+ ") at offset " + record.offset());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
public static void main(String[] args)
|
||||
{
|
||||
if (KafkaSecurityUtil.isSecurityModel())
|
||||
{
|
||||
try
|
||||
{
|
||||
LOG.info("Securitymode start.");
|
||||
|
||||
//!!注意,安全认证时,需要用户手动修改为自己申请的机机账号
|
||||
KafkaSecurityUtil.securityPrepare();
|
||||
}
|
||||
catch (IOException e)
|
||||
{
|
||||
LOG.error("Security prepare failure.");
|
||||
LOG.error("The IOException occured : {}.", e);
|
||||
return;
|
||||
}
|
||||
LOG.info("Security prepare success.");
|
||||
}
|
||||
|
||||
NewConsumer consumerThread = new NewConsumer(KafkaProperties.TOPIC);
|
||||
consumerThread.start();
|
||||
|
||||
// 等到60s后将consumer关闭,实际执行过程中可修改
|
||||
try
|
||||
{
|
||||
Thread.sleep(60000);
|
||||
}
|
||||
catch (InterruptedException e)
|
||||
{
|
||||
LOG.info("The InterruptedException occured : {}.", e);
|
||||
}
|
||||
finally
|
||||
{
|
||||
consumerThread.shutdown();
|
||||
consumerThread.consumer.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void start() {
|
||||
doWork();
|
||||
}
|
||||
|
||||
private void shutdown(){
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
}
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
package org.dromara.kafka.consumer.filters;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.servlet.*;
|
||||
import javax.servlet.annotation.WebFilter;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* <p>description: </p>
|
||||
*
|
||||
* @author chenle
|
||||
* @date 2021-09-08 15:40
|
||||
*/
|
||||
@WebFilter(filterName="MyFilter",urlPatterns = "/*")
|
||||
public class MyFilter implements Filter {
|
||||
|
||||
private Logger logger = LoggerFactory.getLogger(MyFilter.class);
|
||||
|
||||
@Override
|
||||
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain) throws IOException, ServletException {
|
||||
HttpServletRequest request = (HttpServletRequest) servletRequest;
|
||||
String queryString = request.getQueryString();
|
||||
// logger.error("pre,queryString={}",queryString);
|
||||
filterChain.doFilter(servletRequest,servletResponse);
|
||||
// logger.error("queryString={}",queryString);
|
||||
}
|
||||
}
|
||||
|
|
@ -4,6 +4,7 @@ import cn.hutool.core.bean.BeanUtil;
|
|||
import cn.hutool.core.bean.copier.CopyOptions;
|
||||
import cn.hutool.core.convert.ConvertException;
|
||||
import cn.hutool.core.date.DateTime;
|
||||
import cn.hutool.core.date.DateUnit;
|
||||
import cn.hutool.core.date.DateUtil;
|
||||
import cn.hutool.json.JSONObject;
|
||||
import cn.hutool.json.JSONUtil;
|
||||
|
|
@ -27,7 +28,8 @@ import java.util.Objects;
|
|||
import java.util.concurrent.LinkedBlockingDeque;
|
||||
|
||||
/**
|
||||
* <p>description: </p>
|
||||
* <p>description: 处理kafka数据并发送到data2es
|
||||
* </p>
|
||||
*
|
||||
* @author chenle
|
||||
* @date 2021-09-06 16:44
|
||||
|
|
@ -36,78 +38,20 @@ public class ConsumerWorker implements Runnable {
|
|||
private ConsumerRecord<String, Object> record;
|
||||
private Logger logger = LoggerFactory.getLogger(ConsumerWorker.class);
|
||||
|
||||
public static LinkedBlockingDeque linkedBlockingDeque = new LinkedBlockingDeque<>(5000);
|
||||
|
||||
private String cityCode ;
|
||||
public static LinkedBlockingDeque linkedBlockingDeque = new LinkedBlockingDeque<>(1000);
|
||||
|
||||
ConsumerWorker(ConsumerRecord<String, Object> record, String cityCode) {
|
||||
|
||||
ConsumerWorker(ConsumerRecord<String, Object> record) {
|
||||
this.record = record;
|
||||
this.cityCode = cityCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
//其他地市使用的方法,这里使用了一个巧妙的方法,我们开发的地市都是传4位,这种其他地市的cityCode传大于4位,然后截取
|
||||
if(cityCode.length() > 4){
|
||||
cityCode = cityCode.substring(0,4);
|
||||
normalRequest();
|
||||
}else {
|
||||
//六安、安庆等地市的方法,这些地市都是我们自己公司开发的东西。
|
||||
luanrequest();
|
||||
// luanrequestBatch();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* 废弃方法
|
||||
* */
|
||||
private void luanrequestBatch() {
|
||||
Object value = record.value();
|
||||
String topic = record.topic();
|
||||
List<EsGpsInfo> list = new ArrayList<>();
|
||||
logger.info("offset={},topic={},value={}", record.offset(), topic,value);
|
||||
List<JSONObject> jsonObjects = JSON.parseArray((String) value, JSONObject.class);
|
||||
for (JSONObject jsonObject : jsonObjects) {
|
||||
EsGpsInfo esGpsInfo;
|
||||
/*try {
|
||||
jsonObject = JSONUtil.parseObj(((String) value));
|
||||
}catch (ConvertException e){
|
||||
logger.info("jsonObject=null:error={}",e.getMessage());
|
||||
return;
|
||||
}*/
|
||||
try {
|
||||
esGpsInfo = JSONUtil.toBean(jsonObject, EsGpsInfo.class);
|
||||
}catch (ConvertException e){
|
||||
logger.info("EsGpsInfo=null:error={}",e.getMessage());
|
||||
return;
|
||||
}
|
||||
|
||||
if(Objects.isNull(esGpsInfo)){
|
||||
logger.info("esGpsInfo=null no error");
|
||||
return;
|
||||
}
|
||||
String deviceCode = esGpsInfo.getDeviceCode();
|
||||
if(StringUtils.isEmpty(deviceCode) || deviceCode.length() > 100){
|
||||
logger.info("deviceCode:{} is null or is too long ",deviceCode);
|
||||
return;
|
||||
}
|
||||
String latitude = esGpsInfo.getLat();
|
||||
if(StringUtils.isEmpty(latitude) || "0.0".equals(latitude)){
|
||||
logger.info("latitude:{} is null or is zero ",latitude);
|
||||
return;
|
||||
}
|
||||
String longitude = esGpsInfo.getLng();
|
||||
if(StringUtils.isEmpty(longitude) || "0.0".equals(longitude)){
|
||||
logger.info("longitude:{} is null or is zero ",longitude);
|
||||
return;
|
||||
}
|
||||
esGpsInfo.setInfoSource(cityCode);
|
||||
|
||||
esGpsInfo.setGpsTime(new Date(Long.valueOf(jsonObject.getStr("gpsTime"))));
|
||||
list.add(esGpsInfo);
|
||||
}
|
||||
// dataToEsService.saveGpsInfoBatch(list);
|
||||
}
|
||||
|
||||
private void luanrequest() {
|
||||
Object value = record.value();
|
||||
|
|
@ -119,41 +63,49 @@ public class ConsumerWorker implements Runnable {
|
|||
try {
|
||||
jsonObject = JSONUtil.parseObj(((String) value));
|
||||
}catch (ConvertException e){
|
||||
logger.info("jsonObject=null:error={}",e.getMessage());
|
||||
logger.error("jsonObject=null:error={}",e.getMessage());
|
||||
return;
|
||||
}
|
||||
try {
|
||||
esGpsInfo = JSONUtil.toBean(jsonObject, RemoteGpsInfo.class);
|
||||
}catch (ConvertException e){
|
||||
logger.info("EsGpsInfo=null:error={}",e.getMessage());
|
||||
logger.error("EsGpsInfo=null:error={}",e.getMessage());
|
||||
return;
|
||||
}
|
||||
|
||||
if(Objects.isNull(esGpsInfo)){
|
||||
logger.info("esGpsInfo=null no error");
|
||||
logger.error("esGpsInfo=null no error");
|
||||
return;
|
||||
}
|
||||
String deviceCode = esGpsInfo.getDeviceCode();
|
||||
if(StringUtils.isEmpty(deviceCode) || deviceCode.length() > 100){
|
||||
logger.info("deviceCode:{} is null or is too long ",deviceCode);
|
||||
logger.error("deviceCode:{} is null or is too long ",deviceCode);
|
||||
return;
|
||||
}
|
||||
String latitude = esGpsInfo.getLat();
|
||||
if(StringUtils.isEmpty(latitude) || "0.0".equals(latitude)){
|
||||
logger.info("latitude:{} is null or is zero ",latitude);
|
||||
logger.error("latitude:{} is null or is zero ",latitude);
|
||||
return;
|
||||
}
|
||||
String longitude = esGpsInfo.getLng();
|
||||
if(StringUtils.isEmpty(longitude) || "0.0".equals(longitude)){
|
||||
logger.info("longitude:{} is null or is zero ",longitude);
|
||||
logger.error("longitude:{} is null or is zero ",longitude);
|
||||
return;
|
||||
}
|
||||
esGpsInfo.setInfoSource(cityCode);
|
||||
String infoSource = esGpsInfo.getInfoSource();
|
||||
if(StringUtils.isEmpty(infoSource) ){
|
||||
logger.error("infoSource:{} is null ",infoSource);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
esGpsInfo.setGpsTime(new Date(Long.valueOf(jsonObject.getStr("gpsTime"))));
|
||||
}catch (Exception e){
|
||||
logger.error("error_msg={}",e.getMessage());
|
||||
}
|
||||
if(DateUtil.between(esGpsInfo.getGpsTime(), new Date(), DateUnit.SECOND) > 2000L){
|
||||
return;
|
||||
}
|
||||
logger.info("esGpsInfo={}",esGpsInfo);
|
||||
boolean offer = linkedBlockingDeque.offer(esGpsInfo);
|
||||
R response = R.ok(offer);
|
||||
|
|
@ -169,66 +121,4 @@ public class ConsumerWorker implements Runnable {
|
|||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* 通用的请求(一般地市采用这个方法)
|
||||
*/
|
||||
private void normalRequest() {
|
||||
Object value = record.value();
|
||||
String topic = record.topic();
|
||||
|
||||
logger.info("offset={},topic={},value={}", record.offset(), topic,value);
|
||||
|
||||
RemoteGpsInfo esGpsInfo = new RemoteGpsInfo();
|
||||
EsGpsInfoVO esGpsInfoVO;
|
||||
try {
|
||||
esGpsInfoVO = JSONUtil.toBean(((String) value), EsGpsInfoVO.class);
|
||||
}catch (ConvertException e){
|
||||
logger.info("esGpsInfoVO=null:error={}",e.getMessage());
|
||||
return;
|
||||
}
|
||||
if(Objects.isNull(esGpsInfoVO)){
|
||||
logger.info("esGpsInfoVO=null no error");
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
try {
|
||||
DateTime parse = DateUtil.parse(esGpsInfoVO.getGpsTime(), "yyyy-MM-dd HH:mm:ss");
|
||||
}catch (Exception e){
|
||||
logger.info("gpsTime:{} format error", esGpsInfoVO.getGpsTime());
|
||||
return;
|
||||
}
|
||||
|
||||
String deviceCode = esGpsInfoVO.getDeviceCode();
|
||||
if(StringUtils.isEmpty(deviceCode) || deviceCode.length() > 100){
|
||||
logger.info("deviceCode:{} is null or is too long ",deviceCode);
|
||||
return;
|
||||
}
|
||||
String latitude = esGpsInfoVO.getLatitude();
|
||||
if(StringUtils.isEmpty(latitude) || "0.0".equals(latitude)){
|
||||
logger.info("latitude:{} is null or is zero ",latitude);
|
||||
return;
|
||||
}
|
||||
String longitude = esGpsInfoVO.getLongitude();
|
||||
if(StringUtils.isEmpty(longitude) || "0.0".equals(longitude)){
|
||||
logger.info("longitude:{} is null or is zero ",longitude);
|
||||
return;
|
||||
}
|
||||
BeanUtil.copyProperties(esGpsInfoVO,esGpsInfo,new CopyOptions());
|
||||
esGpsInfo.setLat(latitude);
|
||||
esGpsInfo.setLng(esGpsInfoVO.getLongitude());
|
||||
esGpsInfo.setOrientation(esGpsInfoVO.getDirection());
|
||||
esGpsInfo.setInfoSource(cityCode);
|
||||
|
||||
boolean offer = linkedBlockingDeque.offer(esGpsInfo);
|
||||
R response = R.ok(offer);
|
||||
if(200 == response.getCode()){
|
||||
logger.info("topic={},data2es={}",topic,"success");
|
||||
}else{
|
||||
logger.error("topic={},data2es={}",topic,"fail");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,22 +1,16 @@
|
|||
package org.dromara.kafka.consumer.handler;
|
||||
|
||||
import org.apache.dubbo.config.annotation.DubboReference;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.dromara.data2es.api.RemoteDataToEsService;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
|
||||
import org.springframework.kafka.config.KafkaListenerContainerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.listener.ContainerProperties;
|
||||
import org.springframework.kafka.listener.MessageListener;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
|
|
@ -29,68 +23,39 @@ import java.util.concurrent.ThreadPoolExecutor;
|
|||
*/
|
||||
public class KafkaConsumerRunnable implements Runnable {
|
||||
|
||||
private Map props;
|
||||
private final KafkaConsumer<String, Object> consumer;
|
||||
private ThreadPoolExecutor taskExecutor;
|
||||
|
||||
private String cityCode;
|
||||
private Logger logger = LoggerFactory.getLogger(KafkaConsumerRunnable.class);
|
||||
|
||||
public KafkaConsumerRunnable(Map props, ThreadPoolExecutor taskExecutor,
|
||||
String cityCode) {
|
||||
this.props = props;
|
||||
public KafkaConsumerRunnable(KafkaConsumer<String, Object> consumer, ThreadPoolExecutor taskExecutor) {
|
||||
this.consumer = consumer;
|
||||
this.taskExecutor = taskExecutor;
|
||||
this.cityCode = cityCode;
|
||||
}
|
||||
|
||||
private DefaultKafkaConsumerFactory buildConsumerFactory(){
|
||||
return new DefaultKafkaConsumerFactory<String, String>(props);
|
||||
}
|
||||
|
||||
private ContainerProperties containerProperties(String[] topic, MessageListener<String, Object> messageListener) {
|
||||
ContainerProperties containerProperties = new ContainerProperties(topic);
|
||||
containerProperties.setMessageListener(messageListener);
|
||||
return containerProperties;
|
||||
}
|
||||
|
||||
private KafkaListenerContainerFactory buildListenerFactory(){
|
||||
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory();
|
||||
factory.setConsumerFactory(buildConsumerFactory());
|
||||
factory.setConcurrency(4);
|
||||
factory.setBatchListener(true);
|
||||
|
||||
factory.getContainerProperties().setPollTimeout(3000);
|
||||
return factory;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
KafkaConsumer<String,Object> consumer = new KafkaConsumer<>(props);
|
||||
|
||||
List topics = (List) props.get("topics");
|
||||
consumer.subscribe(topics);
|
||||
consumer.subscribe(Collections.singletonList("jysb_dwxx"));
|
||||
consumer.poll(0); // 令订阅生效
|
||||
|
||||
List<TopicPartition> topicPartitions = new ArrayList<>();
|
||||
Map<String, List<PartitionInfo>> stringListMap = consumer.listTopics();
|
||||
for (Object topic : topics) {
|
||||
String topic1 = (String) topic;
|
||||
String topic1 ="jysb_dwxx";
|
||||
List<PartitionInfo> partitionInfos = stringListMap.get(topic1);
|
||||
for (PartitionInfo partitionInfo : partitionInfos) {
|
||||
TopicPartition partition = new TopicPartition(topic1, partitionInfo.partition());
|
||||
topicPartitions.add(partition);
|
||||
}
|
||||
}
|
||||
consumer.seekToEnd(topicPartitions); // 如果传Collections.emptyList()表示移动所有订阅topic分区offset到最末端
|
||||
|
||||
while (true) {
|
||||
ConsumerRecords<String, Object> records = consumer.poll(Duration.ofMillis(100));
|
||||
for (ConsumerRecord<String, Object> record : records) {
|
||||
taskExecutor.submit(new ConsumerWorker(record, cityCode));
|
||||
logger.info("[Consumer], Received message: (" + record.key() + ", " + record.value()
|
||||
+ ") at offset " + record.offset());
|
||||
taskExecutor.submit(new ConsumerWorker(record));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,108 +0,0 @@
|
|||
package org.dromara.kafka.consumer.handler;
|
||||
|
||||
import cn.hutool.core.date.DateTime;
|
||||
import cn.hutool.core.date.DateUtil;
|
||||
import org.dromara.kafka.consumer.entity.EsGpsInfo;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.Properties;
|
||||
|
||||
/**
|
||||
* <p>description: </p>
|
||||
*
|
||||
* @author chenle
|
||||
* @date 2021-10-28 14:48
|
||||
*/
|
||||
public class KafkaSecurityUtil {
|
||||
|
||||
|
||||
|
||||
|
||||
static Logger logger = LoggerFactory.getLogger(KafkaSecurityUtil.class);
|
||||
|
||||
public static void main(String[] args) {
|
||||
EsGpsInfo esGpsInfo = new EsGpsInfo();
|
||||
String realtime = "2021/11/04 12:00:11";
|
||||
DateTime dateTime = DateUtil.parse(realtime);
|
||||
esGpsInfo.setGpsTime(dateTime.toJdkDate());
|
||||
logger.info("esGpsInfo:{},deviceType={},gpsTime={}",esGpsInfo.toString(),
|
||||
esGpsInfo.getDeviceType(),dateTime.toJdkDate().toString());
|
||||
}
|
||||
/**
|
||||
* 用户自己申请的机机账号keytab文件名称
|
||||
*/
|
||||
private static final String USER_KEYTAB_FILE = "user.keytab";
|
||||
|
||||
/**
|
||||
* 用户自己申请的机机账号名称
|
||||
*/
|
||||
private static final String USER_PRINCIPAL = "aqdsj_ruansi@HADOOP.COM";
|
||||
|
||||
public static void securityPrepare() throws IOException
|
||||
{
|
||||
logger.error("进入了---securityPrepare");
|
||||
//String filePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator;
|
||||
//String krbFile = filePath + "krb5.conf";
|
||||
//ClassPathResource classPathResource = new ClassPathResource("krb5.conf");
|
||||
//String krbFile = classPathResource.getAbsolutePath();
|
||||
String krbFile = "/gpsstore/krb5.conf";
|
||||
// String userKeyTableFile = filePath + USER_KEYTAB_FILE;
|
||||
//ClassPathResource classPathResource1 = new ClassPathResource(USER_KEYTAB_FILE);
|
||||
String userKeyTableFile = "/gpsstore/user.keytab";
|
||||
|
||||
//windows路径下分隔符替换
|
||||
userKeyTableFile = userKeyTableFile.replace("\\", "\\\\");
|
||||
krbFile = krbFile.replace("\\", "\\\\");
|
||||
|
||||
LoginUtil.setKrb5Config(krbFile);
|
||||
LoginUtil.setZookeeperServerPrincipal("zookeeper/hadoop.hadoop.com");
|
||||
logger.error("userKeyTableFile路径---{}",userKeyTableFile);
|
||||
LoginUtil.setJaasFile(USER_PRINCIPAL, userKeyTableFile);
|
||||
}
|
||||
|
||||
public static Boolean isSecurityModel()
|
||||
{
|
||||
Boolean isSecurity = false;
|
||||
//String krbFilePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator + "kafkaSecurityMode";
|
||||
//ClassPathResource classPathResource = new ClassPathResource("kafkaSecurityMode");
|
||||
InputStream inputStream = Thread.currentThread().getContextClassLoader().getResourceAsStream("kafkaSecurityMode");
|
||||
|
||||
/*File file = classPathResource.getFile();
|
||||
|
||||
if(!file.exists()){
|
||||
return isSecurity;
|
||||
}*/
|
||||
|
||||
Properties securityProps = new Properties();
|
||||
|
||||
|
||||
try
|
||||
{
|
||||
securityProps.load(inputStream);
|
||||
if ("yes".equalsIgnoreCase(securityProps.getProperty("kafka.client.security.mode")))
|
||||
{
|
||||
isSecurity = true;
|
||||
}
|
||||
}
|
||||
catch (Exception e)
|
||||
{
|
||||
logger.info("The Exception occured : {}.", e);
|
||||
}
|
||||
|
||||
return isSecurity;
|
||||
}
|
||||
|
||||
/*
|
||||
* 判断文件是否存在
|
||||
*/
|
||||
private static boolean isFileExists(String fileName)
|
||||
{
|
||||
File file = new File(fileName);
|
||||
|
||||
return file.exists();
|
||||
}
|
||||
}
|
||||
|
|
@ -1,9 +1,14 @@
|
|||
package org.dromara.kafka.consumer.handler;
|
||||
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.*;
|
||||
import org.apache.kafka.common.KafkaException;
|
||||
import org.apache.kafka.common.errors.AuthorizationException;
|
||||
import org.apache.kafka.common.errors.RecordDeserializationException;
|
||||
import org.apache.kafka.common.errors.UnsupportedVersionException;
|
||||
import org.apache.kafka.common.serialization.StringDeserializer;
|
||||
import org.dromara.kafka.consumer.config.KafkaPropertiesConfig;
|
||||
import org.dromara.kafka.consumer.config.KafkaProperties;
|
||||
import org.dromara.kafka.consumer.config.LoginUtil;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
|
|
@ -12,10 +17,11 @@ import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
|
|||
import org.springframework.stereotype.Component;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
|
||||
import javax.annotation.Resource;
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.time.Duration;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
|
|
@ -29,101 +35,155 @@ import java.util.concurrent.ThreadPoolExecutor;
|
|||
@Component
|
||||
public class RealConsumer implements CommandLineRunner {
|
||||
|
||||
private String kafkaServers;
|
||||
private Logger logger = LoggerFactory.getLogger(RealConsumer.class);
|
||||
|
||||
private String groupId;
|
||||
private final KafkaConsumer<String, Object> consumer;
|
||||
|
||||
private String topics;
|
||||
|
||||
private String cityCode = "3400";
|
||||
|
||||
|
||||
|
||||
@Autowired
|
||||
KafkaPropertiesConfig kafkaPropertiesConfig;
|
||||
|
||||
@Autowired
|
||||
@Resource
|
||||
ThreadPoolExecutor dtpExecutor2;
|
||||
|
||||
|
||||
private Logger logger = LoggerFactory.getLogger(RealConsumer.class);
|
||||
private volatile boolean closed;
|
||||
|
||||
@Override
|
||||
public void run(String... args) throws Exception {
|
||||
kafkaServers = "127.0.0.1:9092";
|
||||
topics = "topic.send.2,topic.send.3,topic.send.4,topic.send.5,topic.send.8";
|
||||
groupId = "group_ruansi_xuancheng";
|
||||
cityCode = "3418";
|
||||
if(args.length > 0){
|
||||
/*kafkaServers = args[0];
|
||||
topics = args[1];
|
||||
groupId = args[2];
|
||||
cityCode = args[3];*/
|
||||
|
||||
// 一次请求的最大等待时间(S)
|
||||
private final int waitTime = 1;
|
||||
|
||||
// Broker连接地址
|
||||
private final static String BOOTSTRAP_SERVER = "bootstrap.servers";
|
||||
|
||||
// Group id
|
||||
private final static String GROUP_ID = "group.id";
|
||||
|
||||
// 消息内容使用的反序列化类
|
||||
private final static String VALUE_DESERIALIZER = "value.deserializer";
|
||||
|
||||
// 消息Key值使用的反序列化类
|
||||
private final static String KEY_DESERIALIZER = "key.deserializer";
|
||||
|
||||
// 协议类型:当前支持配置为SASL_PLAINTEXT或者PLAINTEXT
|
||||
private final static String SECURITY_PROTOCOL = "security.protocol";
|
||||
|
||||
// 服务名
|
||||
private final static String SASL_KERBEROS_SERVICE_NAME = "sasl.kerberos.service.name";
|
||||
|
||||
// 域名
|
||||
private final static String KERBEROS_DOMAIN_NAME = "kerberos.domain.name";
|
||||
|
||||
// 是否自动提交offset
|
||||
private final static String ENABLE_AUTO_COMMIT = "enable.auto.commit";
|
||||
|
||||
// 自动提交offset的时间间隔
|
||||
private final static String AUTO_COMMIT_INTERVAL_MS = "auto.commit.interval.ms";
|
||||
|
||||
// 会话超时时间
|
||||
private final static String SESSION_TIMEOUT_MS = "session.timeout.ms";
|
||||
|
||||
/**
|
||||
* 用户自己申请的机机账号keytab文件名称
|
||||
*/
|
||||
private static final String USER_KEYTAB_FILE = "user.keytab";
|
||||
|
||||
/**
|
||||
* 用户自己申请的机机账号名称
|
||||
*/
|
||||
private static final String USER_PRINCIPAL = "yhy_ahrs_rcw";
|
||||
|
||||
/**
|
||||
* Consumer构造函数
|
||||
*
|
||||
* @param
|
||||
*/
|
||||
public RealConsumer() {
|
||||
initSecurity();
|
||||
Properties props = initProperties();
|
||||
consumer = new KafkaConsumer<String, Object>(props);
|
||||
// 订阅
|
||||
// consumer.subscribe(Collections.singletonList("jysb_dwxx"));
|
||||
}
|
||||
ExecutorService executorService = Executors.newSingleThreadExecutor();
|
||||
Map kafkaProp = getKafkaProp();
|
||||
|
||||
public static Properties initProperties() {
|
||||
Properties props = new Properties();
|
||||
KafkaProperties kafkaProc = KafkaProperties.getInstance();
|
||||
|
||||
if (KafkaSecurityUtil.isSecurityModel())
|
||||
{
|
||||
try
|
||||
// Broker连接地址
|
||||
props.put(BOOTSTRAP_SERVER, kafkaProc.getValues(BOOTSTRAP_SERVER, "localhost:21007"));
|
||||
// Group id
|
||||
props.put(GROUP_ID, kafkaProc.getValues(GROUP_ID, "DemoConsumer"));
|
||||
// 是否自动提交offset
|
||||
props.put(ENABLE_AUTO_COMMIT, kafkaProc.getValues(ENABLE_AUTO_COMMIT, "true"));
|
||||
// 自动提交offset的时间间隔
|
||||
props.put(AUTO_COMMIT_INTERVAL_MS, kafkaProc.getValues(AUTO_COMMIT_INTERVAL_MS,"1000"));
|
||||
// 会话超时时间
|
||||
props.put(SESSION_TIMEOUT_MS, kafkaProc.getValues(SESSION_TIMEOUT_MS, "30000"));
|
||||
// 消息Key值使用的反序列化类
|
||||
props.put(KEY_DESERIALIZER,
|
||||
kafkaProc.getValues(KEY_DESERIALIZER, "org.apache.kafka.common.serialization.StringDeserializer"));
|
||||
// 消息内容使用的反序列化类
|
||||
props.put(VALUE_DESERIALIZER,
|
||||
kafkaProc.getValues(VALUE_DESERIALIZER, "org.apache.kafka.common.serialization.StringDeserializer"));
|
||||
// 安全协议类型
|
||||
props.put(SECURITY_PROTOCOL, kafkaProc.getValues(SECURITY_PROTOCOL, "SASL_PLAINTEXT"));
|
||||
// 服务名
|
||||
props.put(SASL_KERBEROS_SERVICE_NAME, "kafka");
|
||||
// 域名
|
||||
props.put(KERBEROS_DOMAIN_NAME, kafkaProc.getValues(KERBEROS_DOMAIN_NAME, "hadoop.hadoop.com"));
|
||||
|
||||
return props;
|
||||
}
|
||||
|
||||
/**
|
||||
* 初始化安全认证
|
||||
*/
|
||||
public void initSecurity() {
|
||||
if (LoginUtil.isSecurityModel())
|
||||
{
|
||||
try {
|
||||
logger.info("Securitymode start.");
|
||||
|
||||
//!!注意,安全认证时,需要用户手动修改为自己申请的机机账号
|
||||
//认证方式 SASL_PLAINTEXT 或者 PLAINTEXT
|
||||
kafkaProp.put("security.protocol","SASL_PLAINTEXT");
|
||||
//服务名
|
||||
kafkaProp.put("sasl.kerberos.service.name","kafka");
|
||||
//域名
|
||||
kafkaProp.put("kerberos.domain.name","hadoop.hadoop.com");
|
||||
KafkaSecurityUtil.securityPrepare();
|
||||
}
|
||||
catch (IOException e)
|
||||
{
|
||||
// !!注意,安全认证时,需要用户手动修改为自己申请的机机账号
|
||||
LoginUtil.securityPrepare(USER_PRINCIPAL, USER_KEYTAB_FILE);
|
||||
} catch (IOException e) {
|
||||
logger.error("Security prepare failure.");
|
||||
logger.error("The IOException occured.", e);
|
||||
return;
|
||||
}
|
||||
logger.info("Security prepare success.");
|
||||
}
|
||||
|
||||
KafkaConsumerRunnable runnable = new KafkaConsumerRunnable(kafkaProp,dtpExecutor2,cityCode);
|
||||
executorService.execute(runnable);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* 获取kafka配置
|
||||
* @return
|
||||
* 订阅Topic的消息处理函数
|
||||
*/
|
||||
private Map<String, Object> getKafkaProp() {
|
||||
// Properties map = new Properties();
|
||||
Map<String, Object> map = new HashMap<>();
|
||||
map.put("bootstrap.servers",kafkaServers);
|
||||
map.put("group.id",groupId);
|
||||
map.put("enable.auto.commit", "true");
|
||||
map.put("auto.commit.interval.ms", "1000");
|
||||
map.put("session.timeout.ms", "30000");
|
||||
map.put("key.deserializer", StringDeserializer.class);
|
||||
map.put("value.deserializer", StringDeserializer.class);
|
||||
map.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG,5);
|
||||
// map.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG,1000 * 5);
|
||||
// map.put("ack.mode", "manual_immediate");
|
||||
public void run(String... args) throws Exception{
|
||||
try {
|
||||
logger.info("进入消费");
|
||||
ExecutorService executorService = Executors.newSingleThreadExecutor();
|
||||
// realConsumer.run();
|
||||
KafkaConsumerRunnable runnable = new KafkaConsumerRunnable(consumer,dtpExecutor2);
|
||||
executorService.execute(runnable);
|
||||
// 消息消费请求
|
||||
/* ConsumerRecords<String, Object> records = consumer.poll(Duration.ofSeconds(waitTime));
|
||||
// 消息处理
|
||||
for (ConsumerRecord<String, Object> record : records) {
|
||||
logger.info("[Consumer], Received message: (" + record.key() + ", " + record.value()
|
||||
+ ") at offset " + record.offset());
|
||||
dtpExecutor2.submit(new ConsumerWorker(record));
|
||||
|
||||
// //认证方式 SASL_PLAINTEXT 或者 PLAINTEXT
|
||||
// map.put("security.protocol","SASL_PLAINTEXT");
|
||||
// //服务名
|
||||
// map.put("sasl.kerberos.service.name","kafka");
|
||||
// //域名
|
||||
// map.put("kerberos.domain.name","hadoop.hadoop.com");
|
||||
String[] split = topics.split(",");
|
||||
List list = CollectionUtils.arrayToList(split);
|
||||
map.put("topics", list);
|
||||
return map;
|
||||
}*/
|
||||
} catch (AuthorizationException | UnsupportedVersionException
|
||||
| RecordDeserializationException e) {
|
||||
logger.error(e.getMessage());
|
||||
// 无法从异常中恢复
|
||||
} catch (OffsetOutOfRangeException | NoOffsetForPartitionException e) {
|
||||
logger.error("Invalid or no offset found, using latest");
|
||||
consumer.seekToEnd(e.partitions());
|
||||
consumer.commitSync();
|
||||
} catch (KafkaException e) {
|
||||
logger.error(e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -7,8 +7,6 @@ import org.springframework.context.annotation.Configuration;
|
|||
import org.springframework.web.servlet.HandlerInterceptor;
|
||||
import org.springframework.web.servlet.ModelAndView;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
|
||||
/**
|
||||
* <p>description: </p>
|
||||
|
|
|
|||
|
|
@ -1,28 +1,49 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<configuration scan="true" scanPeriod="60 seconds" debug="false">
|
||||
<!-- 日志存放路径 -->
|
||||
<property name="log.path" value="logs/${project.artifactId}" />
|
||||
<property name="log.path" value="logs" />
|
||||
<property name="log.file" value="consumer" />
|
||||
<property name="MAX_FILE_SIZE" value="50MB" />
|
||||
<property name="MAX_HISTORY" value="30" />
|
||||
<!-- 日志输出格式 -->
|
||||
<property name="console.log.pattern"
|
||||
value="%red(%d{yyyy-MM-dd HH:mm:ss}) %green([%thread]) %highlight(%-5level) %boldMagenta(%logger{36}%n) - %msg%n"/>
|
||||
|
||||
<!-- 控制台输出 -->
|
||||
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<!-- INFO日志Appender -->
|
||||
<appender name="FILE_INFO" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.path}/info.${log.file}.log</file>
|
||||
<filter class="ch.qos.logback.classic.filter.LevelFilter">
|
||||
<level>INFO</level>
|
||||
<onMatch>ACCEPT</onMatch>
|
||||
<onMismatch>DENY</onMismatch>
|
||||
</filter>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${log.path}/info/info.${log.file}.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
|
||||
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
|
||||
<maxHistory>${MAX_HISTORY}</maxHistory>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>${console.log.pattern}</pattern>
|
||||
<charset>utf-8</charset>
|
||||
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<include resource="logback-common.xml" />
|
||||
<!-- ERROR日志Appender -->
|
||||
<appender name="FILE_ERROR" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.path}/error.${log.file}.log</file>
|
||||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||
<level>ERROR</level>
|
||||
</filter>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${log.path}/error/error.${log.file}.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
|
||||
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
|
||||
<maxHistory>${MAX_HISTORY}</maxHistory>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<include resource="logback-logstash.xml" />
|
||||
|
||||
<!-- 开启 skywalking 日志收集 -->
|
||||
<include resource="logback-skylog.xml" />
|
||||
|
||||
<!--系统操作日志-->
|
||||
<root level="info">
|
||||
<appender-ref ref="console" />
|
||||
<!-- 根Logger配置(禁用控制台输出) -->
|
||||
<root level="INFO">
|
||||
<appender-ref ref="FILE_INFO" />
|
||||
<appender-ref ref="FILE_ERROR" />
|
||||
</root>
|
||||
|
||||
</configuration>
|
||||
|
|
|
|||
|
|
@ -0,0 +1,103 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<parent>
|
||||
<groupId>org.dromara</groupId>
|
||||
<artifactId>stwzhj-modules</artifactId>
|
||||
<version>${revision}</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>stwzhj-data2StKafka</artifactId>
|
||||
|
||||
<description>
|
||||
stwzhj-data2StKafka 消费地市kafka发送到省厅kafka
|
||||
</description>
|
||||
|
||||
<dependencies>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.alibaba</groupId>
|
||||
<artifactId>fastjson</artifactId>
|
||||
</dependency>
|
||||
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-lang3</artifactId>
|
||||
</dependency>
|
||||
|
||||
|
||||
|
||||
<!--动态线程池-->
|
||||
<dependency>
|
||||
<groupId>cn.dynamictp</groupId>
|
||||
<artifactId>dynamic-tp-spring-boot-starter-common</artifactId>
|
||||
<version>1.1.0</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.12</artifactId>
|
||||
<version>3.6.1-h0.cbu.mrs.350.r11</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.apache.zookeeper</groupId>
|
||||
<artifactId>zookeeper</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>net.sf.jopt-simple</groupId>
|
||||
<artifactId>jopt-simple</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>com.huawei.mrs</groupId>
|
||||
<artifactId>manager-wc2frm</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.xerial.snappy</groupId>
|
||||
<artifactId>snappy-java</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>com.huawei.mrs</groupId>
|
||||
<artifactId>om-controller-api</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>com.101tec</groupId>
|
||||
<artifactId>zkclient</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>3.6.1-h0.cbu.mrs.350.r11</version>
|
||||
</dependency>
|
||||
|
||||
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<finalName>${project.artifactId}</finalName>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-maven-plugin</artifactId>
|
||||
<version>${spring-boot.version}</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>repackage</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
</project>
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
package org.dromara.data2kafka;
|
||||
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||
import org.springframework.boot.autoconfigure.data.redis.RedisAutoConfiguration;
|
||||
import org.springframework.boot.autoconfigure.data.redis.RedisRepositoriesAutoConfiguration;
|
||||
import org.springframework.boot.context.metrics.buffering.BufferingApplicationStartup;
|
||||
import org.springframework.scheduling.annotation.EnableScheduling;
|
||||
|
||||
@EnableScheduling
|
||||
@SpringBootApplication
|
||||
public class Data2KafkaApplication {
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication application = new SpringApplication(Data2KafkaApplication.class);
|
||||
application.setApplicationStartup(new BufferingApplicationStartup(2048));
|
||||
application.run(args);
|
||||
System.out.println("(♥◠‿◠)ノ゙ 消费数据发送至省厅启动成功 ლ(´ڡ`ლ)゙ ");
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,69 @@
|
|||
package org.dromara.data2kafka.config;
|
||||
|
||||
import com.dtp.common.em.QueueTypeEnum;
|
||||
import com.dtp.common.em.RejectedTypeEnum;
|
||||
import com.dtp.core.support.ThreadPoolBuilder;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
|
||||
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* <p>description: </p>
|
||||
*
|
||||
* @author chenle
|
||||
* @date 2021-09-06 16:31
|
||||
*/
|
||||
@Configuration
|
||||
public class AsyncConfig {
|
||||
|
||||
@Bean("taskExecutor")
|
||||
public ThreadPoolTaskExecutor taskExecutor(){
|
||||
ThreadPoolTaskExecutor taskExecutor = new ThreadPoolTaskExecutor();
|
||||
taskExecutor.setCorePoolSize(8);
|
||||
taskExecutor.setMaxPoolSize(20);
|
||||
taskExecutor.setQueueCapacity(200);
|
||||
taskExecutor.setKeepAliveSeconds(60);
|
||||
taskExecutor.setThreadNamePrefix("hfapp--kafkaConsumer--");
|
||||
taskExecutor.setWaitForTasksToCompleteOnShutdown(true);
|
||||
taskExecutor.setAwaitTerminationSeconds(60);
|
||||
taskExecutor.setRejectedExecutionHandler(new ThreadPoolExecutor.DiscardOldestPolicy());
|
||||
return taskExecutor;
|
||||
}
|
||||
|
||||
/**
|
||||
* tips: 建议直接在配置中心配置就行,不用 @Bean 声明
|
||||
* @return 线程池实例
|
||||
*/
|
||||
// @Bean(name = "dtpExecutor2")
|
||||
public ThreadPoolExecutor dtpExecutor2() {
|
||||
return ThreadPoolBuilder.newBuilder()
|
||||
.threadPoolName("dtpExecutor2")
|
||||
.corePoolSize(8)
|
||||
.maximumPoolSize(20)
|
||||
.keepAliveTime(60)
|
||||
.timeUnit(TimeUnit.MILLISECONDS)
|
||||
.workQueue(QueueTypeEnum.VARIABLE_LINKED_BLOCKING_QUEUE.getName(), 1024, false)
|
||||
.waitForTasksToCompleteOnShutdown(true)
|
||||
.awaitTerminationSeconds(60)
|
||||
.rejectedExecutionHandler(RejectedTypeEnum.CALLER_RUNS_POLICY.getName())
|
||||
.buildDynamic();
|
||||
}
|
||||
|
||||
@Bean(name = "threadPoolExecutor")
|
||||
public ThreadPoolExecutor threadPoolExecutor() {
|
||||
return new ThreadPoolExecutor(
|
||||
8, // 核心线程数
|
||||
20, // 最大线程数
|
||||
60, // 空闲时间300秒
|
||||
TimeUnit.SECONDS,
|
||||
new LinkedBlockingQueue<>(10000), // 任务队列最大长度
|
||||
new ThreadPoolExecutor.CallerRunsPolicy() // 拒绝策略:由调用线程处理
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
@ -0,0 +1,138 @@
|
|||
package org.dromara.data2kafka.config;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.Properties;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public final class KafkaProperties
|
||||
{
|
||||
private static final Logger LOG = LoggerFactory.getLogger(KafkaProperties.class);
|
||||
|
||||
// Topic名称,安全模式下,需要以管理员用户添加当前用户的访问权限
|
||||
public final static String TOPIC = "jysb_dwxx";
|
||||
|
||||
private static Properties serverProps = new Properties();
|
||||
|
||||
private static Properties producerProps = new Properties();
|
||||
|
||||
private static Properties consumerProps = new Properties();
|
||||
|
||||
private static Properties clientProps = new Properties();
|
||||
|
||||
private static KafkaProperties instance = null;
|
||||
|
||||
private KafkaProperties()
|
||||
{
|
||||
// String filePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator;
|
||||
String filePath = "/home/rsoft/config/";
|
||||
LOG.info("路径=={}",filePath);
|
||||
try
|
||||
{
|
||||
File proFile = new File(filePath + "producer.properties");
|
||||
|
||||
if (proFile.exists())
|
||||
{
|
||||
producerProps.load(new FileInputStream(filePath + "producer.properties"));
|
||||
}
|
||||
|
||||
File conFile = new File(filePath + "producer.properties");
|
||||
|
||||
if (conFile.exists())
|
||||
{
|
||||
consumerProps.load(new FileInputStream(filePath + "consumer.properties"));
|
||||
}
|
||||
|
||||
File serFile = new File(filePath + "server.properties");
|
||||
|
||||
if (serFile.exists())
|
||||
{
|
||||
serverProps.load(new FileInputStream(filePath + "server.properties"));
|
||||
}
|
||||
|
||||
File cliFile = new File(filePath + "client.properties");
|
||||
|
||||
if (cliFile.exists())
|
||||
{
|
||||
clientProps.load(new FileInputStream(filePath + "client.properties"));
|
||||
}
|
||||
}
|
||||
catch (IOException e)
|
||||
{
|
||||
LOG.info("The Exception occured.", e);
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized static KafkaProperties getInstance()
|
||||
{
|
||||
if (null == instance)
|
||||
{
|
||||
instance = new KafkaProperties();
|
||||
}
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取参数值
|
||||
* @param key properites的key值
|
||||
* @param defValue 默认值
|
||||
* @return
|
||||
*/
|
||||
public String getValues(String key, String defValue)
|
||||
{
|
||||
String rtValue = null;
|
||||
|
||||
if (null == key)
|
||||
{
|
||||
LOG.error("key is null");
|
||||
}
|
||||
else
|
||||
{
|
||||
rtValue = getPropertiesValue(key);
|
||||
}
|
||||
|
||||
if (null == rtValue)
|
||||
{
|
||||
LOG.warn("KafkaProperties.getValues return null, key is " + key);
|
||||
rtValue = defValue;
|
||||
}
|
||||
|
||||
LOG.info("KafkaProperties.getValues: key is " + key + "; Value is " + rtValue);
|
||||
|
||||
return rtValue;
|
||||
}
|
||||
|
||||
/**
|
||||
* 根据key值获取server.properties的值
|
||||
* @param key
|
||||
* @return
|
||||
*/
|
||||
private String getPropertiesValue(String key)
|
||||
{
|
||||
String rtValue = serverProps.getProperty(key);
|
||||
|
||||
// server.properties中没有,则再向producer.properties中获取
|
||||
if (null == rtValue)
|
||||
{
|
||||
rtValue = producerProps.getProperty(key);
|
||||
}
|
||||
|
||||
// producer中没有,则再向consumer.properties中获取
|
||||
if (null == rtValue)
|
||||
{
|
||||
rtValue = consumerProps.getProperty(key);
|
||||
}
|
||||
|
||||
// consumer没有,则再向client.properties中获取
|
||||
if (null == rtValue)
|
||||
{
|
||||
rtValue = clientProps.getProperty(key);
|
||||
}
|
||||
|
||||
return rtValue;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,259 @@
|
|||
package org.dromara.data2kafka.config;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.util.Properties;
|
||||
|
||||
public class LoginUtil {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(LoginUtil.class);
|
||||
|
||||
/**
|
||||
* no JavaDoc
|
||||
*/
|
||||
public enum Module {
|
||||
KAFKA("KafkaClient"), ZOOKEEPER("Client");
|
||||
|
||||
private String name;
|
||||
|
||||
private Module(String name)
|
||||
{
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public String getName()
|
||||
{
|
||||
return name;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* line operator string
|
||||
*/
|
||||
private static final String LINE_SEPARATOR = System.getProperty("line.separator");
|
||||
|
||||
/**
|
||||
* jaas file postfix
|
||||
*/
|
||||
private static final String JAAS_POSTFIX = ".jaas.conf";
|
||||
|
||||
/**
|
||||
* is IBM jdk or not
|
||||
*/
|
||||
private static final boolean IS_IBM_JDK = System.getProperty("java.vendor").contains("IBM");
|
||||
|
||||
/**
|
||||
* IBM jdk login module
|
||||
*/
|
||||
private static final String IBM_LOGIN_MODULE = "com.ibm.security.auth.module.Krb5LoginModule required";
|
||||
|
||||
/**
|
||||
* oracle jdk login module
|
||||
*/
|
||||
private static final String SUN_LOGIN_MODULE = "com.sun.security.auth.module.Krb5LoginModule required";
|
||||
|
||||
/**
|
||||
* Zookeeper quorum principal.
|
||||
*/
|
||||
public static final String ZOOKEEPER_AUTH_PRINCIPAL = "zookeeper.server.principal";
|
||||
|
||||
/**
|
||||
* java security krb5 file path
|
||||
*/
|
||||
public static final String JAVA_SECURITY_KRB5_CONF = "java.security.krb5.conf";
|
||||
|
||||
/**
|
||||
* java security login file path
|
||||
*/
|
||||
public static final String JAVA_SECURITY_LOGIN_CONF = "java.security.auth.login.config";
|
||||
|
||||
/**
|
||||
* 设置jaas.conf文件
|
||||
*
|
||||
* @param principal
|
||||
* @param keytabPath
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void setJaasFile(String principal, String keytabPath)
|
||||
throws IOException {
|
||||
String jaasPath =
|
||||
new File(System.getProperty("java.io.tmpdir")) + File.separator + System.getProperty("user.name")
|
||||
+ JAAS_POSTFIX;
|
||||
|
||||
// windows路径下分隔符替换
|
||||
jaasPath = jaasPath.replace("\\", "\\\\");
|
||||
// 删除jaas文件
|
||||
deleteJaasFile(jaasPath);
|
||||
writeJaasFile(jaasPath, principal, keytabPath);
|
||||
System.setProperty(JAVA_SECURITY_LOGIN_CONF, jaasPath);
|
||||
}
|
||||
|
||||
/**
|
||||
* 设置zookeeper服务端principal
|
||||
*
|
||||
* @param zkServerPrincipal
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void setZookeeperServerPrincipal(String zkServerPrincipal)
|
||||
throws IOException {
|
||||
System.setProperty(ZOOKEEPER_AUTH_PRINCIPAL, zkServerPrincipal);
|
||||
String ret = System.getProperty(ZOOKEEPER_AUTH_PRINCIPAL);
|
||||
if (ret == null)
|
||||
{
|
||||
throw new IOException(ZOOKEEPER_AUTH_PRINCIPAL + " is null.");
|
||||
}
|
||||
if (!ret.equals(zkServerPrincipal))
|
||||
{
|
||||
throw new IOException(ZOOKEEPER_AUTH_PRINCIPAL + " is " + ret + " is not " + zkServerPrincipal + ".");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 设置krb5文件
|
||||
*
|
||||
* @param krb5ConfFile
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void setKrb5Config(String krb5ConfFile)
|
||||
throws IOException {
|
||||
System.setProperty(JAVA_SECURITY_KRB5_CONF, krb5ConfFile);
|
||||
String ret = System.getProperty(JAVA_SECURITY_KRB5_CONF);
|
||||
if (ret == null)
|
||||
{
|
||||
throw new IOException(JAVA_SECURITY_KRB5_CONF + " is null.");
|
||||
}
|
||||
if (!ret.equals(krb5ConfFile))
|
||||
{
|
||||
throw new IOException(JAVA_SECURITY_KRB5_CONF + " is " + ret + " is not " + krb5ConfFile + ".");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 写入jaas文件
|
||||
*
|
||||
* @throws IOException
|
||||
* 写文件异常
|
||||
*/
|
||||
private static void writeJaasFile(String jaasPath, String principal, String keytabPath)
|
||||
throws IOException {
|
||||
FileWriter writer = new FileWriter(new File(jaasPath));
|
||||
try
|
||||
{
|
||||
writer.write(getJaasConfContext(principal, keytabPath));
|
||||
writer.flush();
|
||||
}
|
||||
catch (IOException e)
|
||||
{
|
||||
throw new IOException("Failed to create jaas.conf File");
|
||||
}
|
||||
finally
|
||||
{
|
||||
writer.close();
|
||||
}
|
||||
}
|
||||
|
||||
private static void deleteJaasFile(String jaasPath)
|
||||
throws IOException {
|
||||
File jaasFile = new File(jaasPath);
|
||||
if (jaasFile.exists())
|
||||
{
|
||||
if (!jaasFile.delete())
|
||||
{
|
||||
throw new IOException("Failed to delete exists jaas file.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static String getJaasConfContext(String principal, String keytabPath) {
|
||||
Module[] allModule = Module.values();
|
||||
StringBuilder builder = new StringBuilder();
|
||||
for (Module modlue : allModule)
|
||||
{
|
||||
builder.append(getModuleContext(principal, keytabPath, modlue));
|
||||
}
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
private static String getModuleContext(String userPrincipal, String keyTabPath, Module module) {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
if (IS_IBM_JDK) {
|
||||
builder.append(module.getName()).append(" {").append(LINE_SEPARATOR);
|
||||
builder.append(IBM_LOGIN_MODULE).append(LINE_SEPARATOR);
|
||||
builder.append("credsType=both").append(LINE_SEPARATOR);
|
||||
builder.append("principal=\"" + userPrincipal + "\"").append(LINE_SEPARATOR);
|
||||
builder.append("useKeytab=\"" + keyTabPath + "\"").append(LINE_SEPARATOR);
|
||||
builder.append("debug=true;").append(LINE_SEPARATOR);
|
||||
builder.append("};").append(LINE_SEPARATOR);
|
||||
} else {
|
||||
builder.append(module.getName()).append(" {").append(LINE_SEPARATOR);
|
||||
builder.append(SUN_LOGIN_MODULE).append(LINE_SEPARATOR);
|
||||
builder.append("useKeyTab=true").append(LINE_SEPARATOR);
|
||||
builder.append("keyTab=\"" + keyTabPath + "\"").append(LINE_SEPARATOR);
|
||||
builder.append("principal=\"" + userPrincipal + "\"").append(LINE_SEPARATOR);
|
||||
builder.append("useTicketCache=false").append(LINE_SEPARATOR);
|
||||
builder.append("storeKey=true").append(LINE_SEPARATOR);
|
||||
builder.append("debug=true;").append(LINE_SEPARATOR);
|
||||
builder.append("};").append(LINE_SEPARATOR);
|
||||
}
|
||||
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
public static void securityPrepare(String principal, String keyTabFile) throws IOException {
|
||||
// String filePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator;
|
||||
String filePath = "/home/rsoft/config/";
|
||||
String krbFile = filePath + "krb5.conf";
|
||||
String userKeyTableFile = filePath + keyTabFile;
|
||||
|
||||
// windows路径下分隔符替换
|
||||
userKeyTableFile = userKeyTableFile.replace("\\", "\\\\");
|
||||
krbFile = krbFile.replace("\\", "\\\\");
|
||||
|
||||
LoginUtil.setKrb5Config(krbFile);
|
||||
LoginUtil.setZookeeperServerPrincipal("zookeeper/hadoop.hadoop.com");
|
||||
LoginUtil.setJaasFile(principal, userKeyTableFile);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check security mode
|
||||
*
|
||||
* @return boolean
|
||||
*/
|
||||
public static Boolean isSecurityModel() {
|
||||
Boolean isSecurity = false;
|
||||
// String krbFilePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator + "kafkaSecurityMode";
|
||||
String krbFilePath = "/home/rsoft/config/kafkaSecurityMode";
|
||||
Properties securityProps = new Properties();
|
||||
|
||||
// file does not exist.
|
||||
if (!isFileExists(krbFilePath)) {
|
||||
return isSecurity;
|
||||
}
|
||||
|
||||
try {
|
||||
securityProps.load(new FileInputStream(krbFilePath));
|
||||
|
||||
if ("yes".equalsIgnoreCase(securityProps.getProperty("kafka.client.security.mode")))
|
||||
{
|
||||
isSecurity = true;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.info("The Exception occured : {}.", e);
|
||||
}
|
||||
|
||||
return isSecurity;
|
||||
}
|
||||
|
||||
/*
|
||||
* 判断文件是否存在
|
||||
*/
|
||||
private static boolean isFileExists(String fileName) {
|
||||
File file = new File(fileName);
|
||||
|
||||
return file.exists();
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,139 @@
|
|||
package org.dromara.data2kafka.consumer;
|
||||
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Component;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
|
||||
//@Component
|
||||
public class Consumer extends Thread {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(Consumer.class);
|
||||
|
||||
|
||||
|
||||
// 一次请求的最大等待时间(S)
|
||||
private final int waitTime = 1;
|
||||
|
||||
// Broker连接地址
|
||||
private final static String BOOTSTRAP_SERVER = "bootstrap.servers";
|
||||
|
||||
// Group id
|
||||
private final static String GROUP_ID = "group.id";
|
||||
|
||||
// 消息内容使用的反序列化类
|
||||
private final static String VALUE_DESERIALIZER = "value.deserializer";
|
||||
|
||||
// 消息Key值使用的反序列化类
|
||||
private final static String KEY_DESERIALIZER = "key.deserializer";
|
||||
|
||||
// 协议类型:当前支持配置为SASL_PLAINTEXT或者PLAINTEXT
|
||||
private final static String SECURITY_PROTOCOL = "security.protocol";
|
||||
|
||||
// 服务名
|
||||
private final static String SASL_KERBEROS_SERVICE_NAME = "sasl.kerberos.service.name";
|
||||
|
||||
// 域名
|
||||
private final static String KERBEROS_DOMAIN_NAME = "kerberos.domain.name";
|
||||
|
||||
// 是否自动提交offset
|
||||
private final static String ENABLE_AUTO_COMMIT = "enable.auto.commit";
|
||||
|
||||
// 自动提交offset的时间间隔
|
||||
private final static String AUTO_COMMIT_INTERVAL_MS = "auto.commit.interval.ms";
|
||||
|
||||
// 会话超时时间
|
||||
private final static String SESSION_TIMEOUT_MS = "session.timeout.ms";
|
||||
|
||||
/**
|
||||
* 用户自己申请的机机账号keytab文件名称
|
||||
*/
|
||||
private static final String USER_KEYTAB_FILE = "user.keytab";
|
||||
|
||||
/**
|
||||
* 用户自己申请的机机账号名称
|
||||
*/
|
||||
private static final String USER_PRINCIPAL = "aqdsj_ruansi@HADOOP.COM";
|
||||
|
||||
@Autowired
|
||||
ThreadPoolExecutor dtpExecutor2;
|
||||
|
||||
/**
|
||||
* Consumer构造函数
|
||||
*
|
||||
* @param
|
||||
*/
|
||||
public Consumer() {
|
||||
initSecurity();
|
||||
Properties props = initProperties();
|
||||
ExecutorService executorService = Executors.newSingleThreadExecutor();
|
||||
KafkaConsumerRunnable runnable = new KafkaConsumerRunnable(props,dtpExecutor2,"3408");
|
||||
executorService.execute(runnable);
|
||||
|
||||
}
|
||||
|
||||
public static Properties initProperties() {
|
||||
Properties props = new Properties();
|
||||
KafkaProperties kafkaProc = KafkaProperties.getInstance();
|
||||
|
||||
// Broker连接地址
|
||||
props.put(BOOTSTRAP_SERVER, kafkaProc.getValues(BOOTSTRAP_SERVER, "localhost:21007"));
|
||||
// Group id
|
||||
props.put(GROUP_ID, kafkaProc.getValues(GROUP_ID, "DemoConsumer"));
|
||||
// 是否自动提交offset
|
||||
props.put(ENABLE_AUTO_COMMIT, kafkaProc.getValues(ENABLE_AUTO_COMMIT, "true"));
|
||||
// 自动提交offset的时间间隔
|
||||
props.put(AUTO_COMMIT_INTERVAL_MS, kafkaProc.getValues(AUTO_COMMIT_INTERVAL_MS,"1000"));
|
||||
// 会话超时时间
|
||||
props.put(SESSION_TIMEOUT_MS, kafkaProc.getValues(SESSION_TIMEOUT_MS, "30000"));
|
||||
// 消息Key值使用的反序列化类
|
||||
props.put(KEY_DESERIALIZER,
|
||||
kafkaProc.getValues(KEY_DESERIALIZER, "org.apache.kafka.common.serialization.StringDeserializer"));
|
||||
// 消息内容使用的反序列化类
|
||||
props.put(VALUE_DESERIALIZER,
|
||||
kafkaProc.getValues(VALUE_DESERIALIZER, "org.apache.kafka.common.serialization.StringDeserializer"));
|
||||
// 安全协议类型
|
||||
props.put(SECURITY_PROTOCOL, kafkaProc.getValues(SECURITY_PROTOCOL, "SASL_PLAINTEXT"));
|
||||
// 服务名
|
||||
props.put(SASL_KERBEROS_SERVICE_NAME, "kafka");
|
||||
// 域名
|
||||
props.put(KERBEROS_DOMAIN_NAME, kafkaProc.getValues(KERBEROS_DOMAIN_NAME, "hadoop.hadoop.com"));
|
||||
|
||||
String topics = "topic.send.2,topic.send.3,topic.send.4,topic.send.5,topic.send.8";
|
||||
String[] split = topics.split(",");
|
||||
List list = CollectionUtils.arrayToList(split);
|
||||
props.put("topics",list);
|
||||
|
||||
return props;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* 初始化安全认证
|
||||
*/
|
||||
public void initSecurity() {
|
||||
if (org.dromara.data2kafka.config.LoginUtil.isSecurityModel())
|
||||
{
|
||||
try {
|
||||
LOG.info("Securitymode start.");
|
||||
|
||||
// !!注意,安全认证时,需要用户手动修改为自己申请的机机账号
|
||||
LoginUtil.securityPrepare(USER_PRINCIPAL, USER_KEYTAB_FILE);
|
||||
} catch (IOException e) {
|
||||
LOG.error("Security prepare failure.");
|
||||
LOG.error("The IOException occured.", e);
|
||||
}
|
||||
LOG.info("Security prepare success.");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,219 @@
|
|||
package org.dromara.data2kafka.consumer;
|
||||
|
||||
import cn.hutool.core.bean.BeanUtil;
|
||||
import cn.hutool.core.bean.copier.CopyOptions;
|
||||
import cn.hutool.core.convert.ConvertException;
|
||||
import cn.hutool.core.date.DateTime;
|
||||
import cn.hutool.core.date.DateUtil;
|
||||
import cn.hutool.json.JSONObject;
|
||||
import cn.hutool.json.JSONUtil;
|
||||
import com.alibaba.fastjson.JSON;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.dromara.data2kafka.domain.EsGpsInfo;
|
||||
import org.dromara.data2kafka.domain.EsGpsInfoVO;
|
||||
import org.dromara.data2kafka.producer.Producer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* <p>description: </p>
|
||||
*
|
||||
* @author chenle
|
||||
* @date 2021-09-06 16:44
|
||||
*/
|
||||
public class ConsumerWorker implements Runnable {
|
||||
private ConsumerRecord<String, Object> record;
|
||||
|
||||
|
||||
private final Producer producer;
|
||||
|
||||
private Logger logger = LoggerFactory.getLogger(ConsumerWorker.class);
|
||||
|
||||
|
||||
|
||||
|
||||
private String cityCode ;
|
||||
|
||||
ConsumerWorker(ConsumerRecord<String, Object> record, String cityCode) {
|
||||
this.producer = Producer.getInstance();
|
||||
this.record = record;
|
||||
this.cityCode = cityCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
//其他地市使用的方法,这里使用了一个巧妙的方法,我们开发的地市都是传4位,这种其他地市的cityCode传大于4位,然后截取
|
||||
if(cityCode.length() > 4){
|
||||
cityCode = cityCode.substring(0,4);
|
||||
normalRequest();
|
||||
}else {
|
||||
//六安、安庆等地市的方法,这些地市都是我们自己公司开发的东西。
|
||||
luanrequest();
|
||||
// luanrequestBatch();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* 废弃方法
|
||||
* */
|
||||
private void luanrequestBatch() {
|
||||
Object value = record.value();
|
||||
String topic = record.topic();
|
||||
List<EsGpsInfo> list = new ArrayList<>();
|
||||
logger.info("offset={},topic={},value={}", record.offset(), topic,value);
|
||||
List<JSONObject> jsonObjects = JSON.parseArray((String) value, JSONObject.class);
|
||||
for (JSONObject jsonObject : jsonObjects) {
|
||||
EsGpsInfo esGpsInfo;
|
||||
/*try {
|
||||
jsonObject = JSONUtil.parseObj(((String) value));
|
||||
}catch (ConvertException e){
|
||||
logger.info("jsonObject=null:error={}",e.getMessage());
|
||||
return;
|
||||
}*/
|
||||
try {
|
||||
esGpsInfo = JSONUtil.toBean(jsonObject, EsGpsInfo.class);
|
||||
}catch (ConvertException e){
|
||||
logger.info("EsGpsInfo=null:error={}",e.getMessage());
|
||||
return;
|
||||
}
|
||||
|
||||
if(Objects.isNull(esGpsInfo)){
|
||||
logger.info("esGpsInfo=null no error");
|
||||
return;
|
||||
}
|
||||
String deviceCode = esGpsInfo.getDeviceCode();
|
||||
if(StringUtils.isEmpty(deviceCode) || deviceCode.length() > 100){
|
||||
logger.info("deviceCode:{} is null or is too long ",deviceCode);
|
||||
return;
|
||||
}
|
||||
String latitude = esGpsInfo.getLat();
|
||||
if(StringUtils.isEmpty(latitude) || "0.0".equals(latitude)){
|
||||
logger.info("latitude:{} is null or is zero ",latitude);
|
||||
return;
|
||||
}
|
||||
String longitude = esGpsInfo.getLng();
|
||||
if(StringUtils.isEmpty(longitude) || "0.0".equals(longitude)){
|
||||
logger.info("longitude:{} is null or is zero ",longitude);
|
||||
return;
|
||||
}
|
||||
esGpsInfo.setInfoSource(cityCode);
|
||||
|
||||
esGpsInfo.setGpsTime(new Date(Long.valueOf(jsonObject.getStr("gpsTime"))));
|
||||
list.add(esGpsInfo);
|
||||
}
|
||||
// dataToEsService.saveGpsInfoBatch(list);
|
||||
}
|
||||
|
||||
private void luanrequest() {
|
||||
Object value = record.value();
|
||||
String topic = record.topic();
|
||||
|
||||
logger.info("offset={},topic={},value={}", record.offset(), topic,value);
|
||||
EsGpsInfo esGpsInfo;
|
||||
JSONObject jsonObject;
|
||||
try {
|
||||
jsonObject = JSONUtil.parseObj(((String) value));
|
||||
}catch (ConvertException e){
|
||||
logger.info("jsonObject=null:error={}",e.getMessage());
|
||||
return;
|
||||
}
|
||||
try {
|
||||
esGpsInfo = JSONUtil.toBean(jsonObject, EsGpsInfo.class);
|
||||
}catch (ConvertException e){
|
||||
logger.info("EsGpsInfo=null:error={}",e.getMessage());
|
||||
return;
|
||||
}
|
||||
|
||||
if(Objects.isNull(esGpsInfo)){
|
||||
logger.info("esGpsInfo=null no error");
|
||||
return;
|
||||
}
|
||||
String deviceCode = esGpsInfo.getDeviceCode();
|
||||
if(StringUtils.isEmpty(deviceCode) || deviceCode.length() > 100){
|
||||
logger.info("deviceCode:{} is null or is too long ",deviceCode);
|
||||
return;
|
||||
}
|
||||
String latitude = esGpsInfo.getLat();
|
||||
if(StringUtils.isEmpty(latitude) || "0.0".equals(latitude)){
|
||||
logger.info("latitude:{} is null or is zero ",latitude);
|
||||
return;
|
||||
}
|
||||
String longitude = esGpsInfo.getLng();
|
||||
if(StringUtils.isEmpty(longitude) || "0.0".equals(longitude)){
|
||||
logger.info("longitude:{} is null or is zero ",longitude);
|
||||
return;
|
||||
}
|
||||
esGpsInfo.setInfoSource(cityCode);
|
||||
try {
|
||||
esGpsInfo.setGpsTime(new Date(Long.valueOf(jsonObject.getStr("gpsTime"))));
|
||||
}catch (Exception e){
|
||||
logger.error("error_msg={}",e.getMessage());
|
||||
}
|
||||
producer.sendMessage("jysb_dwxx",JSONUtil.toJsonStr(esGpsInfo));
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* 通用的请求(一般地市采用这个方法)
|
||||
*/
|
||||
private void normalRequest() {
|
||||
Object value = record.value();
|
||||
String topic = record.topic();
|
||||
|
||||
logger.info("offset={},topic={},value={}", record.offset(), topic,value);
|
||||
|
||||
EsGpsInfo gpsInfo = new EsGpsInfo();
|
||||
EsGpsInfoVO esGpsInfoVO;
|
||||
try {
|
||||
esGpsInfoVO = JSONUtil.toBean(((String) value), EsGpsInfoVO.class);
|
||||
}catch (ConvertException e){
|
||||
logger.info("esGpsInfoVO=null:error={}",e.getMessage());
|
||||
return;
|
||||
}
|
||||
if(Objects.isNull(esGpsInfoVO)){
|
||||
logger.info("esGpsInfoVO=null no error");
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
try {
|
||||
DateTime parse = DateUtil.parse(esGpsInfoVO.getGpsTime(), "yyyy-MM-dd HH:mm:ss");
|
||||
}catch (Exception e){
|
||||
logger.info("gpsTime:{} format error", esGpsInfoVO.getGpsTime());
|
||||
return;
|
||||
}
|
||||
|
||||
String deviceCode = esGpsInfoVO.getDeviceCode();
|
||||
if(StringUtils.isEmpty(deviceCode) || deviceCode.length() > 100){
|
||||
logger.info("deviceCode:{} is null or is too long ",deviceCode);
|
||||
return;
|
||||
}
|
||||
String latitude = esGpsInfoVO.getLatitude();
|
||||
if(StringUtils.isEmpty(latitude) || "0.0".equals(latitude)){
|
||||
logger.info("latitude:{} is null or is zero ",latitude);
|
||||
return;
|
||||
}
|
||||
String longitude = esGpsInfoVO.getLongitude();
|
||||
if(StringUtils.isEmpty(longitude) || "0.0".equals(longitude)){
|
||||
logger.info("longitude:{} is null or is zero ",longitude);
|
||||
return;
|
||||
}
|
||||
BeanUtil.copyProperties(esGpsInfoVO,gpsInfo,new CopyOptions());
|
||||
gpsInfo.setLat(latitude);
|
||||
gpsInfo.setLng(esGpsInfoVO.getLongitude());
|
||||
gpsInfo.setOrientation(esGpsInfoVO.getDirection());
|
||||
gpsInfo.setInfoSource(cityCode);
|
||||
producer.sendMessage("jysb_dwxx",JSONUtil.toJsonStr(gpsInfo));
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,70 @@
|
|||
package org.dromara.data2kafka.consumer;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
|
||||
/**
|
||||
* <p>description: </p>
|
||||
*
|
||||
* @author chenle
|
||||
* @date 2021-09-06 16:39
|
||||
*/
|
||||
public class KafkaConsumerRunnable implements Runnable {
|
||||
|
||||
private Map props;
|
||||
private ThreadPoolExecutor taskExecutor;
|
||||
|
||||
private String cityCode;
|
||||
private Logger logger = LoggerFactory.getLogger(KafkaConsumerRunnable.class);
|
||||
|
||||
public KafkaConsumerRunnable(Map props, ThreadPoolExecutor taskExecutor,
|
||||
String cityCode) {
|
||||
this.props = props;
|
||||
this.taskExecutor = taskExecutor;
|
||||
this.cityCode = cityCode;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
KafkaConsumer<String, Object> consumer = new KafkaConsumer<>(props);
|
||||
|
||||
List topics = (List) props.get("topics");
|
||||
consumer.subscribe(topics);
|
||||
// consumer.poll(0); // 令订阅生效
|
||||
//
|
||||
// List<TopicPartition> topicPartitions = new ArrayList<>();
|
||||
// Map<String, List<PartitionInfo>> stringListMap = consumer.listTopics();
|
||||
// for (Object topic : topics) {
|
||||
// String topic1 = (String) topic;
|
||||
// List<PartitionInfo> partitionInfos = stringListMap.get(topic1);
|
||||
// for (PartitionInfo partitionInfo : partitionInfos) {
|
||||
// TopicPartition partition = new TopicPartition(topic1, partitionInfo.partition());
|
||||
// topicPartitions.add(partition);
|
||||
// }
|
||||
// }
|
||||
// consumer.seekToEnd(topicPartitions); // 如果传Collections.emptyList()表示移动所有订阅topic分区offset到最末端
|
||||
|
||||
logger.info("SCRAM认证Kafka订阅Topic成功:{}", topics);
|
||||
while (true) {
|
||||
// 延长poll超时到10秒(跨网段足够)
|
||||
ConsumerRecords<String, Object> records = consumer.poll(Duration.ofMillis(10000));
|
||||
logger.info("Poll到消息数:{}", records.count());
|
||||
for (ConsumerRecord<String, Object> record : records) {
|
||||
logger.info("offset={}, topic={}, value={}", record.offset(), record.topic(), record.value());
|
||||
taskExecutor.submit(new ConsumerWorker(record, cityCode));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,138 @@
|
|||
package org.dromara.data2kafka.consumer;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.Properties;
|
||||
|
||||
public final class KafkaProperties
|
||||
{
|
||||
private static final Logger LOG = LoggerFactory.getLogger(KafkaProperties.class);
|
||||
|
||||
// Topic名称,安全模式下,需要以管理员用户添加当前用户的访问权限
|
||||
public final static String TOPIC = "jysb_dwxx";
|
||||
|
||||
private static Properties serverProps = new Properties();
|
||||
|
||||
private static Properties producerProps = new Properties();
|
||||
|
||||
private static Properties consumerProps = new Properties();
|
||||
|
||||
private static Properties clientProps = new Properties();
|
||||
|
||||
private static KafkaProperties instance = null;
|
||||
|
||||
private KafkaProperties()
|
||||
{
|
||||
// String filePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator;
|
||||
String filePath = "/shengting/gpsstore/";
|
||||
LOG.info("路径=={}",filePath);
|
||||
try
|
||||
{
|
||||
File proFile = new File(filePath + "producer.properties");
|
||||
|
||||
if (proFile.exists())
|
||||
{
|
||||
producerProps.load(new FileInputStream(filePath + "producer.properties"));
|
||||
}
|
||||
|
||||
File conFile = new File(filePath + "producer.properties");
|
||||
|
||||
if (conFile.exists())
|
||||
{
|
||||
consumerProps.load(new FileInputStream(filePath + "consumer.properties"));
|
||||
}
|
||||
|
||||
File serFile = new File(filePath + "server.properties");
|
||||
|
||||
if (serFile.exists())
|
||||
{
|
||||
serverProps.load(new FileInputStream(filePath + "server.properties"));
|
||||
}
|
||||
|
||||
File cliFile = new File(filePath + "client.properties");
|
||||
|
||||
if (cliFile.exists())
|
||||
{
|
||||
clientProps.load(new FileInputStream(filePath + "client.properties"));
|
||||
}
|
||||
}
|
||||
catch (IOException e)
|
||||
{
|
||||
LOG.info("The Exception occured.", e);
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized static KafkaProperties getInstance()
|
||||
{
|
||||
if (null == instance)
|
||||
{
|
||||
instance = new KafkaProperties();
|
||||
}
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取参数值
|
||||
* @param key properites的key值
|
||||
* @param defValue 默认值
|
||||
* @return
|
||||
*/
|
||||
public String getValues(String key, String defValue)
|
||||
{
|
||||
String rtValue = null;
|
||||
|
||||
if (null == key)
|
||||
{
|
||||
LOG.error("key is null");
|
||||
}
|
||||
else
|
||||
{
|
||||
rtValue = getPropertiesValue(key);
|
||||
}
|
||||
|
||||
if (null == rtValue)
|
||||
{
|
||||
LOG.warn("KafkaProperties.getValues return null, key is " + key);
|
||||
rtValue = defValue;
|
||||
}
|
||||
|
||||
LOG.info("KafkaProperties.getValues: key is " + key + "; Value is " + rtValue);
|
||||
|
||||
return rtValue;
|
||||
}
|
||||
|
||||
/**
|
||||
* 根据key值获取server.properties的值
|
||||
* @param key
|
||||
* @return
|
||||
*/
|
||||
private String getPropertiesValue(String key)
|
||||
{
|
||||
String rtValue = serverProps.getProperty(key);
|
||||
|
||||
// server.properties中没有,则再向producer.properties中获取
|
||||
if (null == rtValue)
|
||||
{
|
||||
rtValue = producerProps.getProperty(key);
|
||||
}
|
||||
|
||||
// producer中没有,则再向consumer.properties中获取
|
||||
if (null == rtValue)
|
||||
{
|
||||
rtValue = consumerProps.getProperty(key);
|
||||
}
|
||||
|
||||
// consumer没有,则再向client.properties中获取
|
||||
if (null == rtValue)
|
||||
{
|
||||
rtValue = clientProps.getProperty(key);
|
||||
}
|
||||
|
||||
return rtValue;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,259 @@
|
|||
package org.dromara.data2kafka.consumer;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.util.Properties;
|
||||
|
||||
public class LoginUtil {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(LoginUtil.class);
|
||||
|
||||
/**
|
||||
* no JavaDoc
|
||||
*/
|
||||
public enum Module {
|
||||
KAFKA("KafkaClient"), ZOOKEEPER("Client");
|
||||
|
||||
private String name;
|
||||
|
||||
private Module(String name)
|
||||
{
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public String getName()
|
||||
{
|
||||
return name;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* line operator string
|
||||
*/
|
||||
private static final String LINE_SEPARATOR = System.getProperty("line.separator");
|
||||
|
||||
/**
|
||||
* jaas file postfix
|
||||
*/
|
||||
private static final String JAAS_POSTFIX = ".jaas.conf";
|
||||
|
||||
/**
|
||||
* is IBM jdk or not
|
||||
*/
|
||||
private static final boolean IS_IBM_JDK = System.getProperty("java.vendor").contains("IBM");
|
||||
|
||||
/**
|
||||
* IBM jdk login module
|
||||
*/
|
||||
private static final String IBM_LOGIN_MODULE = "com.ibm.security.auth.module.Krb5LoginModule required";
|
||||
|
||||
/**
|
||||
* oracle jdk login module
|
||||
*/
|
||||
private static final String SUN_LOGIN_MODULE = "com.sun.security.auth.module.Krb5LoginModule required";
|
||||
|
||||
/**
|
||||
* Zookeeper quorum principal.
|
||||
*/
|
||||
public static final String ZOOKEEPER_AUTH_PRINCIPAL = "zookeeper.server.principal";
|
||||
|
||||
/**
|
||||
* java security krb5 file path
|
||||
*/
|
||||
public static final String JAVA_SECURITY_KRB5_CONF = "java.security.krb5.conf";
|
||||
|
||||
/**
|
||||
* java security login file path
|
||||
*/
|
||||
public static final String JAVA_SECURITY_LOGIN_CONF = "java.security.auth.login.config";
|
||||
|
||||
/**
|
||||
* 设置jaas.conf文件
|
||||
*
|
||||
* @param principal
|
||||
* @param keytabPath
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void setJaasFile(String principal, String keytabPath)
|
||||
throws IOException {
|
||||
String jaasPath =
|
||||
new File(System.getProperty("java.io.tmpdir")) + File.separator + System.getProperty("user.name")
|
||||
+ JAAS_POSTFIX;
|
||||
|
||||
// windows路径下分隔符替换
|
||||
jaasPath = jaasPath.replace("\\", "\\\\");
|
||||
// 删除jaas文件
|
||||
deleteJaasFile(jaasPath);
|
||||
writeJaasFile(jaasPath, principal, keytabPath);
|
||||
System.setProperty(JAVA_SECURITY_LOGIN_CONF, jaasPath);
|
||||
}
|
||||
|
||||
/**
|
||||
* 设置zookeeper服务端principal
|
||||
*
|
||||
* @param zkServerPrincipal
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void setZookeeperServerPrincipal(String zkServerPrincipal)
|
||||
throws IOException {
|
||||
System.setProperty(ZOOKEEPER_AUTH_PRINCIPAL, zkServerPrincipal);
|
||||
String ret = System.getProperty(ZOOKEEPER_AUTH_PRINCIPAL);
|
||||
if (ret == null)
|
||||
{
|
||||
throw new IOException(ZOOKEEPER_AUTH_PRINCIPAL + " is null.");
|
||||
}
|
||||
if (!ret.equals(zkServerPrincipal))
|
||||
{
|
||||
throw new IOException(ZOOKEEPER_AUTH_PRINCIPAL + " is " + ret + " is not " + zkServerPrincipal + ".");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 设置krb5文件
|
||||
*
|
||||
* @param krb5ConfFile
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void setKrb5Config(String krb5ConfFile)
|
||||
throws IOException {
|
||||
System.setProperty(JAVA_SECURITY_KRB5_CONF, krb5ConfFile);
|
||||
String ret = System.getProperty(JAVA_SECURITY_KRB5_CONF);
|
||||
if (ret == null)
|
||||
{
|
||||
throw new IOException(JAVA_SECURITY_KRB5_CONF + " is null.");
|
||||
}
|
||||
if (!ret.equals(krb5ConfFile))
|
||||
{
|
||||
throw new IOException(JAVA_SECURITY_KRB5_CONF + " is " + ret + " is not " + krb5ConfFile + ".");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 写入jaas文件
|
||||
*
|
||||
* @throws IOException
|
||||
* 写文件异常
|
||||
*/
|
||||
private static void writeJaasFile(String jaasPath, String principal, String keytabPath)
|
||||
throws IOException {
|
||||
FileWriter writer = new FileWriter(new File(jaasPath));
|
||||
try
|
||||
{
|
||||
writer.write(getJaasConfContext(principal, keytabPath));
|
||||
writer.flush();
|
||||
}
|
||||
catch (IOException e)
|
||||
{
|
||||
throw new IOException("Failed to create jaas.conf File");
|
||||
}
|
||||
finally
|
||||
{
|
||||
writer.close();
|
||||
}
|
||||
}
|
||||
|
||||
private static void deleteJaasFile(String jaasPath)
|
||||
throws IOException {
|
||||
File jaasFile = new File(jaasPath);
|
||||
if (jaasFile.exists())
|
||||
{
|
||||
if (!jaasFile.delete())
|
||||
{
|
||||
throw new IOException("Failed to delete exists jaas file.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static String getJaasConfContext(String principal, String keytabPath) {
|
||||
Module[] allModule = Module.values();
|
||||
StringBuilder builder = new StringBuilder();
|
||||
for (Module modlue : allModule)
|
||||
{
|
||||
builder.append(getModuleContext(principal, keytabPath, modlue));
|
||||
}
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
private static String getModuleContext(String userPrincipal, String keyTabPath, Module module) {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
if (IS_IBM_JDK) {
|
||||
builder.append(module.getName()).append(" {").append(LINE_SEPARATOR);
|
||||
builder.append(IBM_LOGIN_MODULE).append(LINE_SEPARATOR);
|
||||
builder.append("credsType=both").append(LINE_SEPARATOR);
|
||||
builder.append("principal=\"" + userPrincipal + "\"").append(LINE_SEPARATOR);
|
||||
builder.append("useKeytab=\"" + keyTabPath + "\"").append(LINE_SEPARATOR);
|
||||
builder.append("debug=true;").append(LINE_SEPARATOR);
|
||||
builder.append("};").append(LINE_SEPARATOR);
|
||||
} else {
|
||||
builder.append(module.getName()).append(" {").append(LINE_SEPARATOR);
|
||||
builder.append(SUN_LOGIN_MODULE).append(LINE_SEPARATOR);
|
||||
builder.append("useKeyTab=true").append(LINE_SEPARATOR);
|
||||
builder.append("keyTab=\"" + keyTabPath + "\"").append(LINE_SEPARATOR);
|
||||
builder.append("principal=\"" + userPrincipal + "\"").append(LINE_SEPARATOR);
|
||||
builder.append("useTicketCache=false").append(LINE_SEPARATOR);
|
||||
builder.append("storeKey=true").append(LINE_SEPARATOR);
|
||||
builder.append("debug=true;").append(LINE_SEPARATOR);
|
||||
builder.append("};").append(LINE_SEPARATOR);
|
||||
}
|
||||
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
public static void securityPrepare(String principal, String keyTabFile) throws IOException {
|
||||
// String filePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator;
|
||||
String filePath = "/shengting/gpsstore/";
|
||||
String krbFile = filePath + "krb5.conf";
|
||||
String userKeyTableFile = filePath + keyTabFile;
|
||||
|
||||
// windows路径下分隔符替换
|
||||
userKeyTableFile = userKeyTableFile.replace("\\", "\\\\");
|
||||
krbFile = krbFile.replace("\\", "\\\\");
|
||||
|
||||
LoginUtil.setKrb5Config(krbFile);
|
||||
LoginUtil.setZookeeperServerPrincipal("zookeeper/hadoop.hadoop.com");
|
||||
LoginUtil.setJaasFile(principal, userKeyTableFile);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check security mode
|
||||
*
|
||||
* @return boolean
|
||||
*/
|
||||
public static Boolean isSecurityModel() {
|
||||
Boolean isSecurity = false;
|
||||
// String krbFilePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator + "kafkaSecurityMode";
|
||||
String krbFilePath = "/shengting/gpsstore/kafkaSecurityMode";
|
||||
Properties securityProps = new Properties();
|
||||
|
||||
// file does not exist.
|
||||
if (!isFileExists(krbFilePath)) {
|
||||
return isSecurity;
|
||||
}
|
||||
|
||||
try {
|
||||
securityProps.load(new FileInputStream(krbFilePath));
|
||||
|
||||
if ("yes".equalsIgnoreCase(securityProps.getProperty("kafka.client.security.mode")))
|
||||
{
|
||||
isSecurity = true;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.info("The Exception occured : {}.", e);
|
||||
}
|
||||
|
||||
return isSecurity;
|
||||
}
|
||||
|
||||
/*
|
||||
* 判断文件是否存在
|
||||
*/
|
||||
private static boolean isFileExists(String fileName) {
|
||||
File file = new File(fileName);
|
||||
|
||||
return file.exists();
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,149 @@
|
|||
package org.dromara.data2kafka.consumer;
|
||||
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.common.serialization.StringDeserializer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.CommandLineRunner;
|
||||
import org.springframework.stereotype.Component;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.ConnectException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.Socket;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
|
||||
/**
|
||||
* <p>description: </p>
|
||||
*
|
||||
* @author chenle
|
||||
* @date 2021-09-06 11:15
|
||||
*/
|
||||
@Component
|
||||
public class RealConsumer implements CommandLineRunner {
|
||||
|
||||
private String kafkaServers;
|
||||
|
||||
private String groupId;
|
||||
|
||||
private String topics;
|
||||
|
||||
private String cityCode = "3400";
|
||||
|
||||
/**
|
||||
* 用户自己申请的机机账号keytab文件名称
|
||||
*/
|
||||
private static final String USER_KEYTAB_FILE = "user.keytab";
|
||||
|
||||
/**
|
||||
* 用户自己申请的机机账号名称
|
||||
*/
|
||||
private static final String USER_PRINCIPAL = "aqdsj_ruansi@HADOOP.COM";
|
||||
|
||||
@Autowired
|
||||
ThreadPoolExecutor dtpExecutor2;
|
||||
|
||||
|
||||
private Logger logger = LoggerFactory.getLogger(RealConsumer.class);
|
||||
|
||||
@Override
|
||||
public void run(String... args) throws Exception {
|
||||
kafkaServers = "127.0.0.1:9092";
|
||||
topics = "topic.send.2,topic.send.3,topic.send.4,topic.send.5,topic.send.8";
|
||||
groupId = "group_ruansi_xuancheng";
|
||||
cityCode = "3418";
|
||||
if(args.length > 0){
|
||||
kafkaServers = args[0];
|
||||
topics = args[1];
|
||||
groupId = args[2];
|
||||
cityCode = args[3];
|
||||
|
||||
}
|
||||
ExecutorService executorService = Executors.newSingleThreadExecutor();
|
||||
Map kafkaProp = getKafkaProp();
|
||||
|
||||
checkNetworkConnection("53.1.213.25",21007);
|
||||
if (false)
|
||||
{
|
||||
try
|
||||
{
|
||||
logger.info("consumer Securitymode start.");
|
||||
|
||||
//!!注意,安全认证时,需要用户手动修改为自己申请的机机账号
|
||||
//认证方式 SASL_PLAINTEXT 或者 PLAINTEXT
|
||||
kafkaProp.put("security.protocol","SASL_PLAINTEXT");
|
||||
//服务名
|
||||
kafkaProp.put("sasl.kerberos.service.name","kafka");
|
||||
//域名
|
||||
kafkaProp.put("kerberos.domain.name","hadoop.hadoop.com");
|
||||
LoginUtil.securityPrepare(USER_PRINCIPAL,USER_KEYTAB_FILE);
|
||||
// LoginUtil.setJaasFile("","");
|
||||
}
|
||||
catch (IOException e)
|
||||
{
|
||||
logger.error("Security prepare failure.");
|
||||
logger.error("The IOException occured.", e);
|
||||
return;
|
||||
}
|
||||
logger.info("Security prepare success.");
|
||||
}
|
||||
kafkaProp.put("socket.connection.setup.timeout.ms", "60000");
|
||||
|
||||
kafkaProp.put("security.protocol", "SASL_PLAINTEXT");
|
||||
kafkaProp.put("sasl.jaas.config", "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"rsoft\" password=\"rsoft-2026\";");
|
||||
kafkaProp.put("sasl.mechanism", "SCRAM-SHA-256");
|
||||
kafkaProp.put("metadata.max.age.ms", Long.MAX_VALUE); // 彻底禁用元数据更新
|
||||
KafkaConsumerRunnable runnable = new KafkaConsumerRunnable(kafkaProp,dtpExecutor2,cityCode);
|
||||
executorService.execute(runnable);
|
||||
}
|
||||
|
||||
|
||||
private void checkNetworkConnection(String host, int port) {
|
||||
try (Socket socket = new Socket()) {
|
||||
socket.connect(new InetSocketAddress(host, port), 3000);
|
||||
logger.info("✅ 网络连接正常: {}:{}", host, port);
|
||||
} catch (IOException e) {
|
||||
logger.error("🚨 无法连接到 {}:{} - {}", host, port, e.getMessage());
|
||||
// 详细错误分析
|
||||
if (e instanceof ConnectException) {
|
||||
logger.error("请检查: 1. Kafka服务状态 2. 防火墙设置 3. 端口是否正确");
|
||||
} else if (e instanceof UnknownHostException) {
|
||||
logger.error("主机名解析失败,请检查DNS或hosts文件");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* 获取kafka配置
|
||||
* @return
|
||||
*/
|
||||
private Map<String, Object> getKafkaProp() {
|
||||
// Properties map = new Properties();
|
||||
Map<String, Object> map = new HashMap<>();
|
||||
map.put("bootstrap.servers",kafkaServers);
|
||||
map.put("group.id",groupId);
|
||||
map.put("enable.auto.commit", true);
|
||||
map.put("auto.commit.interval.ms", 1000);
|
||||
map.put("session.timeout.ms", "30000");
|
||||
map.put("key.deserializer", StringDeserializer.class);
|
||||
map.put("value.deserializer", StringDeserializer.class);
|
||||
map.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG,5);
|
||||
String[] split = topics.split(",");
|
||||
List list = CollectionUtils.arrayToList(split);
|
||||
map.put("topics", list);
|
||||
return map;
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
package org.dromara.data2kafka.domain;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonFormat;
|
||||
import lombok.Data;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Date;
|
||||
|
||||
/**
|
||||
* <p>description: </p>
|
||||
* gps定位信息(es表)
|
||||
* @author chenle
|
||||
* @date 2021-05-14 9:39
|
||||
*/
|
||||
@Data
|
||||
public class EsGpsInfo implements Serializable {
|
||||
|
||||
private static final long serialVersionUID = 7455495841680488351L;
|
||||
/**
|
||||
* 唯一码(外部系统)合肥版本不需要 21位id,
|
||||
* 到时候上传省厅的时候 需要在kafka发送端处理,生成一个省厅需要的21位id
|
||||
*/
|
||||
private String deviceCode;
|
||||
/**
|
||||
* 类型
|
||||
*/
|
||||
private String deviceType;
|
||||
private String lat;
|
||||
private String lng;
|
||||
//方向
|
||||
private String orientation;
|
||||
//高程
|
||||
private String height;
|
||||
//精度
|
||||
private String deltaH;
|
||||
private String speed;
|
||||
|
||||
private String zzjgdm;
|
||||
private String zzjgmc;
|
||||
private String policeNo;
|
||||
private String policeName;
|
||||
private String phoneNum;
|
||||
private String carNum;
|
||||
|
||||
private Integer online;
|
||||
|
||||
@JsonFormat(pattern="yyyy-MM-dd HH:mm:ss",timezone="GMT+8")
|
||||
private Date gpsTime;
|
||||
//3401,3402等地市代码
|
||||
private String infoSource;
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
package org.dromara.data2kafka.domain;
|
||||
|
||||
import lombok.Data;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* <p>description: </p>
|
||||
*
|
||||
* @author chenle
|
||||
* @date 2022-04-16 14:59
|
||||
*/
|
||||
@Data
|
||||
public class EsGpsInfoVO implements Serializable {
|
||||
/**
|
||||
* 设备串号,设备唯一值
|
||||
*/
|
||||
private String deviceCode;
|
||||
private String latitude;
|
||||
private String longitude;
|
||||
//方向
|
||||
private String direction;
|
||||
//高程
|
||||
private String height;
|
||||
//精度
|
||||
private String speed;
|
||||
|
||||
private String gpsTime;
|
||||
|
||||
private String zzjgdm;
|
||||
|
||||
private String zzjgmc;
|
||||
|
||||
private String policeNo;
|
||||
|
||||
private String policeName;
|
||||
|
||||
private String carNum;
|
||||
|
||||
private Integer online;
|
||||
}
|
||||
|
|
@ -0,0 +1,215 @@
|
|||
package org.dromara.data2kafka.producer;
|
||||
|
||||
import com.alibaba.fastjson.JSONObject;
|
||||
import org.apache.kafka.clients.producer.Callback;
|
||||
import org.apache.kafka.clients.producer.KafkaProducer;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import org.apache.kafka.clients.producer.RecordMetadata;
|
||||
import org.dromara.data2kafka.config.KafkaProperties;
|
||||
import org.dromara.data2kafka.config.LoginUtil;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Future;
|
||||
|
||||
/**
|
||||
* <p>description: </p>
|
||||
*
|
||||
* @author chenle
|
||||
* @date 2021-11-03 14:15
|
||||
*/
|
||||
@Component
|
||||
public class Producer {
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(Producer.class);
|
||||
|
||||
private final KafkaProducer<String, String> producer;
|
||||
|
||||
// 私有静态实例(volatile 保证可见性和有序性)
|
||||
private static volatile Producer instance;
|
||||
|
||||
|
||||
|
||||
private final Boolean isAsync = true;
|
||||
|
||||
|
||||
|
||||
// Broker地址列表
|
||||
private final static String BOOTSTRAP_SERVER = "bootstrap.servers";
|
||||
|
||||
// 客户端ID
|
||||
private final static String CLIENT_ID = "client.id";
|
||||
|
||||
// Key序列化类
|
||||
private final static String KEY_SERIALIZER = "key.serializer";
|
||||
|
||||
// Value序列化类
|
||||
private final static String VALUE_SERIALIZER = "value.serializer";
|
||||
|
||||
// 协议类型:当前支持配置为SASL_PLAINTEXT或者PLAINTEXT
|
||||
private final static String SECURITY_PROTOCOL = "security.protocol";
|
||||
|
||||
// 服务名
|
||||
private final static String SASL_KERBEROS_SERVICE_NAME = "sasl.kerberos.service.name";
|
||||
|
||||
// 域名
|
||||
private final static String KERBEROS_DOMAIN_NAME = "kerberos.domain.name";
|
||||
|
||||
// 分区类名
|
||||
private final static String PARTITIONER_NAME = "partitioner.class";
|
||||
|
||||
// 默认发送100条消息
|
||||
private final static int MESSAGE_NUM = 100;
|
||||
|
||||
/**
|
||||
* 用户自己申请的机机账号keytab文件名称
|
||||
*/
|
||||
private static final String USER_KEYTAB_FILE = "user.keytab";
|
||||
|
||||
/**
|
||||
* 用户自己申请的机机账号名称
|
||||
*/
|
||||
private static final String USER_PRINCIPAL = "yhy_ahrs_rcw";
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Producer constructor
|
||||
*
|
||||
*/
|
||||
public Producer() {
|
||||
initSecurity();
|
||||
Properties props = initProperties();
|
||||
this.producer = new KafkaProducer<>(props);
|
||||
}
|
||||
|
||||
// 获取单例实例的公共方法(双重校验锁)
|
||||
public static Producer getInstance() {
|
||||
if (instance == null) {
|
||||
synchronized (Producer.class) {
|
||||
if (instance == null) {
|
||||
instance = new Producer();
|
||||
}
|
||||
}
|
||||
}
|
||||
return instance;
|
||||
}
|
||||
|
||||
// 添加 ShutdownHook 确保资源释放(推荐)
|
||||
static {
|
||||
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
|
||||
if (instance != null && instance.producer != null) {
|
||||
instance.producer.close();
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* 初始化安全认证
|
||||
*/
|
||||
public void initSecurity() {
|
||||
if (LoginUtil.isSecurityModel())
|
||||
{
|
||||
try {
|
||||
logger.info("Securitymode start.");
|
||||
|
||||
// !!注意,安全认证时,需要用户手动修改为自己申请的机机账号
|
||||
LoginUtil.securityPrepare(USER_PRINCIPAL, USER_KEYTAB_FILE);
|
||||
} catch (IOException e) {
|
||||
logger.error("Security prepare failure.");
|
||||
logger.error("The IOException occured.", e);
|
||||
}
|
||||
logger.info("Security prepare success.");
|
||||
}
|
||||
}
|
||||
|
||||
public static Properties initProperties() {
|
||||
Properties props = new Properties();
|
||||
KafkaProperties kafkaProc = KafkaProperties.getInstance();
|
||||
|
||||
// Broker地址列表
|
||||
props.put(BOOTSTRAP_SERVER, kafkaProc.getValues(BOOTSTRAP_SERVER, "localhost:21007"));
|
||||
// 客户端ID
|
||||
props.put(CLIENT_ID, kafkaProc.getValues(CLIENT_ID, "DemoProducer"));
|
||||
// Key序列化类
|
||||
props.put(KEY_SERIALIZER,
|
||||
kafkaProc.getValues(KEY_SERIALIZER, "org.apache.kafka.common.serialization.StringSerializer"));
|
||||
// Value序列化类
|
||||
props.put(VALUE_SERIALIZER,
|
||||
kafkaProc.getValues(VALUE_SERIALIZER, "org.apache.kafka.common.serialization.StringSerializer"));
|
||||
// 协议类型:当前支持配置为SASL_PLAINTEXT或者PLAINTEXT
|
||||
props.put(SECURITY_PROTOCOL, kafkaProc.getValues(SECURITY_PROTOCOL, "SASL_PLAINTEXT"));
|
||||
// 服务名
|
||||
props.put(SASL_KERBEROS_SERVICE_NAME, "kafka");
|
||||
// 域名
|
||||
props.put(KERBEROS_DOMAIN_NAME, kafkaProc.getValues(KERBEROS_DOMAIN_NAME, "hadoop.hadoop.com"));
|
||||
// 分区类名
|
||||
// props.put(PARTITIONER_NAME, kafkaProc.getValues(PARTITIONER_NAME, "com.huawei.bigdata.kafka.example.SimplePartitioner"));
|
||||
return props;
|
||||
}
|
||||
|
||||
/**
|
||||
* 发送消息(核心方法)
|
||||
*
|
||||
* @param topic
|
||||
* @param message 消息内容
|
||||
* @return 同步发送时返回 RecordMetadata,异步发送返回 null
|
||||
*/
|
||||
public RecordMetadata sendMessage(String topic, String message) {
|
||||
try {
|
||||
logger.info("调用发送:topic={}, Object={}",topic,message );
|
||||
long startTime = System.currentTimeMillis();
|
||||
ProducerRecord<String, String> record = new ProducerRecord<>(topic, message);
|
||||
if (isAsync) {
|
||||
// 异步发送
|
||||
producer.send(record, new DemoCallBack(startTime,topic, message));
|
||||
return null;
|
||||
} else {
|
||||
Future<RecordMetadata> future = producer.send(record);
|
||||
logger.info("同步发送成功: Object={}", future.get().topic());
|
||||
return future.get();
|
||||
|
||||
}
|
||||
}catch (Exception e){
|
||||
e.printStackTrace();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* 内部回调类
|
||||
*/
|
||||
private static class DemoCallBack implements Callback {
|
||||
private final Logger logger = LoggerFactory.getLogger(DemoCallBack.class);
|
||||
private final long startTime;
|
||||
|
||||
private final String topic;
|
||||
private final String message;
|
||||
|
||||
public DemoCallBack(long startTime, String topic, String message) {
|
||||
this.startTime = startTime;
|
||||
this.topic = topic;
|
||||
this.message = message;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onCompletion(RecordMetadata metadata, Exception exception) {
|
||||
long elapsedTime = System.currentTimeMillis() - startTime;
|
||||
if (metadata != null) {
|
||||
logger.info("topic=({}, {}) sent to partition({}), offset({}) in {} ms",
|
||||
topic, message, metadata.partition(), metadata.offset(), elapsedTime);
|
||||
} else if (exception != null) {
|
||||
logger.error("Message sending failed", exception);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
# Tomcat
|
||||
server:
|
||||
port: 9212
|
||||
|
||||
# Spring
|
||||
spring:
|
||||
application:
|
||||
# 应用名称
|
||||
name: stwzhj-data2StKafka
|
||||
profiles:
|
||||
# 环境配置
|
||||
active: @profiles.active@
|
||||
autoconfigure:
|
||||
exclude: org.springframework.boot.autoconfigure.elasticsearch.ElasticsearchRestClientAutoConfiguration
|
||||
|
||||
# 日志配置
|
||||
logging:
|
||||
level:
|
||||
org.springframework: warn
|
||||
org.apache.dubbo: warn
|
||||
com.alibaba.nacos: warn
|
||||
org.mybatis.spring.mapper: error
|
||||
org.apache.dubbo.config: error
|
||||
org.apache.kafka: DEBUG
|
||||
org.springframework.kafka: DEBUG
|
||||
# 临时处理 spring 调整日志级别导致启动警告问题 不影响使用等待 alibaba 适配
|
||||
org.springframework.context.support.PostProcessorRegistrationDelegate: error
|
||||
config: classpath:logback-plus.xml
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
security.protocol = SASL_PLAINTEXT
|
||||
kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||
kafka.client.zookeeper.principal = zookeeper/hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||
bootstrap.servers = 53.1.213.27:21007,53.1.213.26:21007,53.1.213.25:21007
|
||||
zookeeper.ssl.enable = false
|
||||
sasl.kerberos.service.name = kafka
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
security.protocol = SASL_PLAINTEXT
|
||||
kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||
kafka.client.zookeeper.principal = zookeeper/hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||
bootstrap.servers = 53.1.213.27:21007,53.1.213.26:21007,53.1.213.25:21007
|
||||
zookeeper.ssl.enable = false
|
||||
sasl.kerberos.service.name = kafka
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
config.storage.topic = connect-configs
|
||||
group.id = connect-cluster
|
||||
status.storage.topic = connect-status
|
||||
bootstrap.servers = 53.1.213.27:21007,53.1.213.26:21007,53.1.213.25:21007
|
||||
internal.key.converter.schemas.enable = false
|
||||
sasl.kerberos.service.name = kafka
|
||||
rest.port = 21010
|
||||
config.storage.replication.factor = 3
|
||||
offset.flush.interval.ms = 10000
|
||||
security.protocol = SASL_PLAINTEXT
|
||||
key.converter.schemas.enable = false
|
||||
internal.key.converter = org.apache.kafka.connect.storage.StringConverter
|
||||
kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||
status.storage.replication.factor = 3
|
||||
internal.value.converter.schemas.enable = false
|
||||
value.converter.schemas.enable = false
|
||||
internal.value.converter = org.apache.kafka.connect.storage.StringConverter
|
||||
offset.storage.replication.factor = 3
|
||||
offset.storage.topic = connect-offsets
|
||||
value.converter = org.apache.kafka.connect.storage.StringConverter
|
||||
key.converter = org.apache.kafka.connect.storage.StringConverter
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
consumer.sasl.kerberos.service.name = kafka
|
||||
producer.security.protocol = SASL_PLAINTEXT
|
||||
standalone1.key.converter.schemas.enable = false
|
||||
bootstrap.servers = 53.1.213.27:21007,53.1.213.26:21007,53.1.213.25:21007
|
||||
internal.key.converter.schemas.enable = false
|
||||
sasl.kerberos.service.name = kafka
|
||||
offset.flush.interval.ms = 10000
|
||||
security.protocol = SASL_PLAINTEXT
|
||||
internal.key.converter = org.apache.kafka.connect.storage.StringConverter
|
||||
kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||
offset.storage.file.filename = /tmp/connect.offsets
|
||||
producer.kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||
internal.value.converter.schemas.enable = false
|
||||
internal.value.converter = org.apache.kafka.connect.storage.StringConverter
|
||||
value.converter.schemas.enable = false
|
||||
consumer.security.protocol = SASL_PLAINTEXT
|
||||
value.converter = org.apache.kafka.connect.storage.StringConverter
|
||||
key.converter = org.apache.kafka.connect.storage.StringConverter
|
||||
producer.sasl.kerberos.service.name = kafka
|
||||
consumer.kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
security.protocol = SASL_PLAINTEXT
|
||||
kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||
group.id = example-group1
|
||||
auto.commit.interval.ms = 60000
|
||||
sasl.kerberos.service.name = kafka
|
||||
|
|
@ -0,0 +1 @@
|
|||
cluster.ip.model = IPV4
|
||||
|
|
@ -0,0 +1 @@
|
|||
kafka.client.security.mode = yes
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
[kdcdefaults]
|
||||
kdc_ports = 53.1.213.23:21732
|
||||
kdc_tcp_ports = 53.1.213.23:21732
|
||||
|
||||
[libdefaults]
|
||||
default_realm = A528C942_01A6_1BEF_7A75_0187DC82C40F.COM
|
||||
kdc_timeout = 2500
|
||||
clockskew = 300
|
||||
use_dns_lookup = 0
|
||||
udp_preference_limit = 1465
|
||||
max_retries = 5
|
||||
dns_lookup_kdc = false
|
||||
dns_lookup_realm = false
|
||||
renewable = false
|
||||
forwardable = false
|
||||
renew_lifetime = 0m
|
||||
max_renewable_life = 30m
|
||||
allow_extend_version = false
|
||||
default_ccache_name = FILE:/tmp//krb5cc_%{uid}
|
||||
|
||||
[realms]
|
||||
A528C942_01A6_1BEF_7A75_0187DC82C40F.COM = {
|
||||
kdc = 53.1.213.23:21732
|
||||
kdc = 53.1.213.22:21732
|
||||
admin_server = 53.1.213.22:21730
|
||||
admin_server = 53.1.213.23:21730
|
||||
kpasswd_server = 53.1.213.22:21731
|
||||
kpasswd_server = 53.1.213.23:21731
|
||||
supported_enctypes = aes256-cts-hmac-sha1-96:special aes128-cts-hmac-sha1-96:special
|
||||
kpasswd_port = 21731
|
||||
kadmind_port = 21730
|
||||
kadmind_listen = 53.1.213.23:21730
|
||||
kpasswd_listen = 53.1.213.23:21731
|
||||
renewable = false
|
||||
forwardable = false
|
||||
renew_lifetime = 0m
|
||||
max_renewable_life = 30m
|
||||
acl_file = /opt/huawei/Bigdata/FusionInsight_BASE_8.5.0/install/FusionInsight-kerberos-1.20/kerberos/var/krb5kdc/kadm5.acl
|
||||
dict_file = /opt/huawei/Bigdata/common/runtime0/security/weakPasswdDic/weakPasswdForKdc.ini
|
||||
key_stash_file = /opt/huawei/Bigdata/FusionInsight_BASE_8.5.0/install/FusionInsight-kerberos-1.20/kerberos/var/krb5kdc/.k5.A528C942_01A6_1BEF_7A75_0187DC82C40F.COM
|
||||
}
|
||||
|
||||
[domain_realm]
|
||||
.a528c942_01a6_1bef_7a75_0187dc82c40f.com = A528C942_01A6_1BEF_7A75_0187DC82C40F.COM
|
||||
|
||||
[logging]
|
||||
kdc = SYSLOG:INFO:DAEMON
|
||||
admin_server = SYSLOG:INFO:DAEMON
|
||||
default = SYSLOG:NOTICE:DAEMON
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
security.protocol = SASL_PLAINTEXT
|
||||
kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||
acks = 1
|
||||
bootstrap.servers = 53.1.213.27:21007,53.1.213.26:21007,53.1.213.25:21007
|
||||
sasl.kerberos.service.name = kafka
|
||||
|
|
@ -0,0 +1,192 @@
|
|||
log.cleaner.min.compaction.lag.ms = 0
|
||||
quota.producer.default = 9223372036854775807
|
||||
metric.reporters = com.huawei.bigdata.kafka.kafkabalancer.reporter.plugin.CoreMetricReporter
|
||||
offsets.topic.num.partitions = 50
|
||||
log.flush.interval.messages = 9223372036854775807
|
||||
controller.socket.timeout.ms = 30000
|
||||
auto.create.topics.enable = true
|
||||
log.flush.interval.ms = 9223372036854775807
|
||||
actual.broker.id.ip.map =
|
||||
listener.name.sasl_plaintext.plain.sasl.server.callback.handler.class = com.huawei.kafka.plain.PlainCallBackHandler
|
||||
replica.socket.receive.buffer.bytes = 65536
|
||||
min.insync.replicas = 1
|
||||
ssl.enable = false
|
||||
replica.fetch.wait.max.ms = 500
|
||||
num.recovery.threads.per.data.dir = 10
|
||||
ssl.keystore.type = JKS
|
||||
super.users = User:kafka
|
||||
sasl.mechanism.inter.broker.protocol = GSSAPI
|
||||
default.replication.factor = 2
|
||||
log.preallocate = false
|
||||
sasl.kerberos.principal.to.local.rules = RULE:[2:$1@$0](.*@.*)s/@.*//,RULE:[1:$1@$0](.*@*.COM)s/@.*//,DEFAULT
|
||||
metrics.reporter.topic.replicas = 3
|
||||
actual.broker.id.port.map =
|
||||
fetch.purgatory.purge.interval.requests = 1000
|
||||
replica.socket.timeout.ms = 30000
|
||||
message.max.bytes = 100001200
|
||||
max.connections.per.user = 2147483647
|
||||
transactional.id.expiration.ms = 604800000
|
||||
control.plane.listener.name = TRACE
|
||||
transaction.state.log.replication.factor = 3
|
||||
num.io.threads = 8
|
||||
monitor.zk.ssl.connect = 53.1.213.24:24002,53.1.213.23:24002,53.1.213.22:24002
|
||||
offsets.commit.required.acks = -1
|
||||
log.flush.offset.checkpoint.interval.ms = 60000
|
||||
quota.window.size.seconds = 1
|
||||
delete.topic.enable = true
|
||||
ssl.truststore.type = JKS
|
||||
offsets.commit.timeout.ms = 5000
|
||||
quota.window.num = 11
|
||||
log.partition.strategy = count
|
||||
zookeeper.connect = 53.1.213.24:24002,53.1.213.23:24002,53.1.213.22:24002/kafka
|
||||
authorizer.class.name = org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer
|
||||
auto.reassign.check.interval.ms = 600000
|
||||
user.group.cache.timeout.sec = 300
|
||||
auto.reassign.enable = true
|
||||
num.replica.fetchers = 1
|
||||
alter.log.dirs.replication.quota.window.size.seconds = 1
|
||||
allow.everyone.if.no.acl.found = false
|
||||
ip.mode = IPV4
|
||||
alter.log.dirs.replication.quota.window.num = 11
|
||||
log.roll.jitter.hours = 0
|
||||
tmp.zookeeper.connect = 53.1.213.24:24002,53.1.213.23:24002,53.1.213.22:24002
|
||||
log.cleaner.enable = true
|
||||
offsets.load.buffer.size = 5242880
|
||||
log.cleaner.delete.retention.ms = 86400000
|
||||
ssl.client.auth = none
|
||||
controlled.shutdown.max.retries = 3
|
||||
queued.max.requests = 500
|
||||
metrics.reporter.max.request.size = 104857600
|
||||
offsets.topic.replication.factor = 3
|
||||
log.cleaner.threads = 1
|
||||
transaction.state.log.min.isr = 2
|
||||
sasl.kerberos.service.name = kafka
|
||||
sasl.kerberos.ticket.renew.jitter = 0.05
|
||||
socket.request.max.bytes = 104857600
|
||||
zookeeper.session.timeout.ms = 45000
|
||||
log.retention.bytes = -1
|
||||
log.message.timestamp.type = CreateTime
|
||||
request.total.time.ms.threshold = 30000
|
||||
sasl.kerberos.min.time.before.relogin = 60000
|
||||
zookeeper.set.acl = true
|
||||
connections.max.idle.ms = 600000
|
||||
offsets.retention.minutes = 10080
|
||||
delegation.token.expiry.time.ms = 86400000
|
||||
max.connections = 2147483647
|
||||
is.security.mode = yes
|
||||
transaction.state.log.num.partitions = 50
|
||||
inter.broker.protocol.version = 3.6-IV1
|
||||
replica.fetch.backoff.ms = 1000
|
||||
kafka.metrics.reporters = com.huawei.kafka.PartitionStatusReporter
|
||||
listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL,TRACE:SASL_PLAINTEXT
|
||||
log.retention.hours = 168
|
||||
num.partitions = 2
|
||||
listeners = SASL_PLAINTEXT://53.1.213.25:21007,PLAINTEXT://53.1.213.25:21005,SSL://53.1.213.25:21008,SASL_SSL://53.1.213.25:21009,TRACE://53.1.213.25:21013
|
||||
ssl.enabled.protocols = TLSv1.2
|
||||
delete.records.purgatory.purge.interval.requests = 1
|
||||
monitor.zk.normal.connect = 53.1.213.24:24002,53.1.213.23:24002,53.1.213.22:24002
|
||||
ssl.cipher.suites = TLS_DHE_DSS_WITH_AES_128_GCM_SHA256,TLS_DHE_DSS_WITH_AES_256_GCM_SHA384,TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
|
||||
log.flush.scheduler.interval.ms = 9223372036854775807
|
||||
sasl.port = 21007
|
||||
ssl.mode.enable = true
|
||||
security.protocol = SASL_PLAINTEXT
|
||||
log.index.size.max.bytes = 10485760
|
||||
rack.aware.enable = false
|
||||
security.inter.broker.protocol = SASL_PLAINTEXT
|
||||
replica.fetch.max.bytes = 104857600
|
||||
log.cleaner.dedupe.buffer.size = 134217728
|
||||
replica.high.watermark.checkpoint.interval.ms = 5000
|
||||
replication.quota.window.size.seconds = 1
|
||||
log.cleaner.io.buffer.size = 524288
|
||||
sasl.kerberos.ticket.renew.window.factor = 0.8
|
||||
metrics.reporter.zookeeper.url = 53.1.213.24:24002,53.1.213.23:24002,53.1.213.22:24002/kafka
|
||||
max.connections.per.user.enable = true
|
||||
bootstrap.servers = 53.1.213.27:21007,53.1.213.26:21007,53.1.213.25:21007
|
||||
metrics.reporter.sasl.kerberos.service.name = kafka
|
||||
zookeeper.connection.timeout.ms = 45000
|
||||
metrics.recording.level = INFO
|
||||
metrics.reporter.bootstrap.servers = 53.1.213.27:21009,53.1.213.26:21009,53.1.213.25:21009
|
||||
controlled.shutdown.retry.backoff.ms = 5000
|
||||
sasl-ssl.port = 21009
|
||||
advertised.broker.id.port.map =
|
||||
listener.name.sasl_ssl.plain.sasl.server.callback.handler.class = com.huawei.kafka.plain.PlainCallBackHandler
|
||||
log.roll.hours = 168
|
||||
log.cleanup.policy = delete
|
||||
log.flush.start.offset.checkpoint.interval.ms = 60000
|
||||
host.name = 53.1.213.25
|
||||
max.connections.per.user.overrides =
|
||||
max.connections.per.user.whitelist = kafka,default#principal
|
||||
transaction.state.log.segment.bytes = 104857600
|
||||
max.connections.per.ip = 2147483647
|
||||
offsets.topic.segment.bytes = 104857600
|
||||
background.threads = 10
|
||||
quota.consumer.default = 9223372036854775807
|
||||
request.timeout.ms = 30000
|
||||
log.message.format.version = 3.6-IV1
|
||||
group.initial.rebalance.delay.ms = 3000
|
||||
log.index.interval.bytes = 4096
|
||||
log.segment.bytes = 1073741824
|
||||
log.cleaner.backoff.ms = 15000
|
||||
kafka.zookeeper.root = /kafka
|
||||
offset.metadata.max.bytes = 4096
|
||||
ssl.truststore.location = #{conf_dir}/truststore.jks
|
||||
group.max.session.timeout.ms = 1800000
|
||||
replica.fetch.response.max.bytes = 104857600
|
||||
port = 21005
|
||||
zookeeper.sync.time.ms = 2000
|
||||
log.segment.delete.delay.ms = 60000
|
||||
ssl.port = 21008
|
||||
fetch.max.bytes = 115343360
|
||||
user.group.query.retry.backoff.ms = 300
|
||||
log.dirs = /srv/BigData/kafka/data1/kafka-logs,/srv/BigData/kafka/data2/kafka-logs,/srv/BigData/kafka/data3/kafka-logs,/srv/BigData/kafka/data4/kafka-logs
|
||||
monitor.keytab = /opt/huawei/Bigdata/om-agent/nodeagent/etc/agent/omm.keytab
|
||||
controlled.shutdown.enable = true
|
||||
az.aware.enable = false
|
||||
compression.type = producer
|
||||
max.connections.per.ip.overrides =
|
||||
log.message.timestamp.difference.max.ms = 9223372036854775807
|
||||
metrics.reporter.kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||
kafka.metrics.polling.interval.secs = 60
|
||||
advertised.listeners.protocol = SASL_SSL
|
||||
sasl.kerberos.kinit.cmd = /opt/huawei/Bigdata/FusionInsight_BASE_8.5.0/install/FusionInsight-kerberos-1.20/kerberos/bin/kinit
|
||||
transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000
|
||||
log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
|
||||
auto.leader.rebalance.enable = true
|
||||
leader.imbalance.check.interval.seconds = 3600
|
||||
log.cleaner.min.cleanable.ratio = 0.5
|
||||
user.group.query.retry = 10
|
||||
replica.lag.time.max.ms = 60000
|
||||
max.incremental.fetch.session.cache.slots = 1000
|
||||
delegation.token.master.key = null
|
||||
num.network.threads = 6
|
||||
reserved.broker.max.id = 65535
|
||||
listener.name.external_sasl_plaintext.plain.sasl.server.callback.handler.class = com.huawei.kafka.plain.PlainCallBackHandler
|
||||
monitor.principal = oms/manager@A528C942_01A6_1BEF_7A75_0187DC82C40F.COM
|
||||
transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
|
||||
socket.send.buffer.bytes = 1024000
|
||||
log.message.downconversion.enable = true
|
||||
advertised.broker.id.ip.map =
|
||||
metrics.reporter.security.protocol = SASL_SSL
|
||||
transaction.state.log.load.buffer.size = 5242880
|
||||
socket.receive.buffer.bytes = 1024000
|
||||
ssl.keystore.location = #{conf_dir}/kafka_broker.jks
|
||||
replica.fetch.min.bytes = 1
|
||||
broker.rack = /default/rack0
|
||||
controller.port = 21013
|
||||
unclean.leader.election.enable = false
|
||||
sasl.enabled.mechanisms = GSSAPI,PLAIN
|
||||
group.min.session.timeout.ms = 6000
|
||||
offsets.retention.check.interval.ms = 600000
|
||||
log.cleaner.io.buffer.load.factor = 0.9
|
||||
transaction.max.timeout.ms = 900000
|
||||
kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||
producer.purgatory.purge.interval.requests = 1000
|
||||
group.max.size = 2147483647
|
||||
broker.id = 1
|
||||
offsets.topic.compression.codec = 0
|
||||
delegation.token.max.lifetime.ms = 604800000
|
||||
replication.quota.window.num = 11
|
||||
enable.advertised.listener = false
|
||||
log.retention.check.interval.ms = 300000
|
||||
leader.imbalance.per.broker.percentage = 10
|
||||
queued.max.request.bytes = -1
|
||||
Binary file not shown.
|
|
@ -0,0 +1,21 @@
|
|||
config.storage.topic = connect-configs
|
||||
group.id = connect-cluster
|
||||
status.storage.topic = connect-status
|
||||
bootstrap.servers = 53.1.213.27:21007,53.1.213.26:21007,53.1.213.25:21007
|
||||
internal.key.converter.schemas.enable = false
|
||||
sasl.kerberos.service.name = kafka
|
||||
rest.port = 21010
|
||||
config.storage.replication.factor = 3
|
||||
offset.flush.interval.ms = 10000
|
||||
security.protocol = SASL_PLAINTEXT
|
||||
key.converter.schemas.enable = false
|
||||
internal.key.converter = org.apache.kafka.connect.storage.StringConverter
|
||||
kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||
status.storage.replication.factor = 3
|
||||
internal.value.converter.schemas.enable = false
|
||||
value.converter.schemas.enable = false
|
||||
internal.value.converter = org.apache.kafka.connect.storage.StringConverter
|
||||
offset.storage.replication.factor = 3
|
||||
offset.storage.topic = connect-offsets
|
||||
value.converter = org.apache.kafka.connect.storage.StringConverter
|
||||
key.converter = org.apache.kafka.connect.storage.StringConverter
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
consumer.sasl.kerberos.service.name = kafka
|
||||
producer.security.protocol = SASL_PLAINTEXT
|
||||
standalone1.key.converter.schemas.enable = false
|
||||
bootstrap.servers = 53.1.213.27:21007,53.1.213.26:21007,53.1.213.25:21007
|
||||
internal.key.converter.schemas.enable = false
|
||||
sasl.kerberos.service.name = kafka
|
||||
offset.flush.interval.ms = 10000
|
||||
security.protocol = SASL_PLAINTEXT
|
||||
internal.key.converter = org.apache.kafka.connect.storage.StringConverter
|
||||
kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||
offset.storage.file.filename = /tmp/connect.offsets
|
||||
producer.kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||
internal.value.converter.schemas.enable = false
|
||||
internal.value.converter = org.apache.kafka.connect.storage.StringConverter
|
||||
value.converter.schemas.enable = false
|
||||
consumer.security.protocol = SASL_PLAINTEXT
|
||||
value.converter = org.apache.kafka.connect.storage.StringConverter
|
||||
key.converter = org.apache.kafka.connect.storage.StringConverter
|
||||
producer.sasl.kerberos.service.name = kafka
|
||||
consumer.kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
security.protocol = SASL_PLAINTEXT
|
||||
kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||
group.id = example-group1
|
||||
auto.commit.interval.ms = 60000
|
||||
sasl.kerberos.service.name = kafka
|
||||
|
|
@ -0,0 +1 @@
|
|||
cluster.ip.model = IPV4
|
||||
|
|
@ -0,0 +1 @@
|
|||
kafka.client.security.mode = yes
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
[kdcdefaults]
|
||||
kdc_ports = 53.1.213.23:21732
|
||||
kdc_tcp_ports = 53.1.213.23:21732
|
||||
|
||||
[libdefaults]
|
||||
default_realm = A528C942_01A6_1BEF_7A75_0187DC82C40F.COM
|
||||
kdc_timeout = 2500
|
||||
clockskew = 300
|
||||
use_dns_lookup = 0
|
||||
udp_preference_limit = 1465
|
||||
max_retries = 5
|
||||
dns_lookup_kdc = false
|
||||
dns_lookup_realm = false
|
||||
renewable = false
|
||||
forwardable = false
|
||||
renew_lifetime = 0m
|
||||
max_renewable_life = 30m
|
||||
allow_extend_version = false
|
||||
default_ccache_name = FILE:/tmp//krb5cc_%{uid}
|
||||
|
||||
[realms]
|
||||
A528C942_01A6_1BEF_7A75_0187DC82C40F.COM = {
|
||||
kdc = 53.1.213.23:21732
|
||||
kdc = 53.1.213.22:21732
|
||||
admin_server = 53.1.213.22:21730
|
||||
admin_server = 53.1.213.23:21730
|
||||
kpasswd_server = 53.1.213.22:21731
|
||||
kpasswd_server = 53.1.213.23:21731
|
||||
supported_enctypes = aes256-cts-hmac-sha1-96:special aes128-cts-hmac-sha1-96:special
|
||||
kpasswd_port = 21731
|
||||
kadmind_port = 21730
|
||||
kadmind_listen = 53.1.213.23:21730
|
||||
kpasswd_listen = 53.1.213.23:21731
|
||||
renewable = false
|
||||
forwardable = false
|
||||
renew_lifetime = 0m
|
||||
max_renewable_life = 30m
|
||||
acl_file = /opt/huawei/Bigdata/FusionInsight_BASE_8.5.0/install/FusionInsight-kerberos-1.20/kerberos/var/krb5kdc/kadm5.acl
|
||||
dict_file = /opt/huawei/Bigdata/common/runtime0/security/weakPasswdDic/weakPasswdForKdc.ini
|
||||
key_stash_file = /opt/huawei/Bigdata/FusionInsight_BASE_8.5.0/install/FusionInsight-kerberos-1.20/kerberos/var/krb5kdc/.k5.A528C942_01A6_1BEF_7A75_0187DC82C40F.COM
|
||||
}
|
||||
|
||||
[domain_realm]
|
||||
.a528c942_01a6_1bef_7a75_0187dc82c40f.com = A528C942_01A6_1BEF_7A75_0187DC82C40F.COM
|
||||
|
||||
[logging]
|
||||
kdc = SYSLOG:INFO:DAEMON
|
||||
admin_server = SYSLOG:INFO:DAEMON
|
||||
default = SYSLOG:NOTICE:DAEMON
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<configuration scan="true" scanPeriod="60 seconds" debug="false">
|
||||
<!-- 日志存放路径 -->
|
||||
<property name="log.path" value="logs" />
|
||||
<property name="log.file" value="data2stKafka" />
|
||||
<property name="MAX_FILE_SIZE" value="50MB" />
|
||||
<property name="MAX_HISTORY" value="30" />
|
||||
<!-- 日志输出格式 -->
|
||||
<!-- INFO日志Appender -->
|
||||
<appender name="FILE_INFO" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.path}/info.${log.file}.log</file>
|
||||
<filter class="ch.qos.logback.classic.filter.LevelFilter">
|
||||
<level>INFO</level>
|
||||
<onMatch>ACCEPT</onMatch>
|
||||
<onMismatch>DENY</onMismatch>
|
||||
</filter>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${log.path}/info/info.${log.file}.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
|
||||
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
|
||||
<maxHistory>${MAX_HISTORY}</maxHistory>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<!-- ERROR日志Appender -->
|
||||
<appender name="FILE_ERROR" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.path}/error.${log.file}.log</file>
|
||||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||
<level>ERROR</level>
|
||||
</filter>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${log.path}/error/error.${log.file}.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
|
||||
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
|
||||
<maxHistory>${MAX_HISTORY}</maxHistory>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<!-- 根Logger配置(禁用控制台输出) -->
|
||||
<root level="INFO">
|
||||
<appender-ref ref="FILE_INFO" />
|
||||
<appender-ref ref="FILE_ERROR" />
|
||||
</root>
|
||||
|
||||
</configuration>
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
security.protocol = SASL_PLAINTEXT
|
||||
kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||
acks = 1
|
||||
bootstrap.servers = 53.1.213.27:21007,53.1.213.26:21007,53.1.213.25:21007
|
||||
sasl.kerberos.service.name = kafka
|
||||
|
|
@ -0,0 +1,192 @@
|
|||
log.cleaner.min.compaction.lag.ms = 0
|
||||
quota.producer.default = 9223372036854775807
|
||||
metric.reporters = com.huawei.bigdata.kafka.kafkabalancer.reporter.plugin.CoreMetricReporter
|
||||
offsets.topic.num.partitions = 50
|
||||
log.flush.interval.messages = 9223372036854775807
|
||||
controller.socket.timeout.ms = 30000
|
||||
auto.create.topics.enable = true
|
||||
log.flush.interval.ms = 9223372036854775807
|
||||
actual.broker.id.ip.map =
|
||||
listener.name.sasl_plaintext.plain.sasl.server.callback.handler.class = com.huawei.kafka.plain.PlainCallBackHandler
|
||||
replica.socket.receive.buffer.bytes = 65536
|
||||
min.insync.replicas = 1
|
||||
ssl.enable = false
|
||||
replica.fetch.wait.max.ms = 500
|
||||
num.recovery.threads.per.data.dir = 10
|
||||
ssl.keystore.type = JKS
|
||||
super.users = User:kafka
|
||||
sasl.mechanism.inter.broker.protocol = GSSAPI
|
||||
default.replication.factor = 2
|
||||
log.preallocate = false
|
||||
sasl.kerberos.principal.to.local.rules = RULE:[2:$1@$0](.*@.*)s/@.*//,RULE:[1:$1@$0](.*@*.COM)s/@.*//,DEFAULT
|
||||
metrics.reporter.topic.replicas = 3
|
||||
actual.broker.id.port.map =
|
||||
fetch.purgatory.purge.interval.requests = 1000
|
||||
replica.socket.timeout.ms = 30000
|
||||
message.max.bytes = 100001200
|
||||
max.connections.per.user = 2147483647
|
||||
transactional.id.expiration.ms = 604800000
|
||||
control.plane.listener.name = TRACE
|
||||
transaction.state.log.replication.factor = 3
|
||||
num.io.threads = 8
|
||||
monitor.zk.ssl.connect = 53.1.213.24:24002,53.1.213.23:24002,53.1.213.22:24002
|
||||
offsets.commit.required.acks = -1
|
||||
log.flush.offset.checkpoint.interval.ms = 60000
|
||||
quota.window.size.seconds = 1
|
||||
delete.topic.enable = true
|
||||
ssl.truststore.type = JKS
|
||||
offsets.commit.timeout.ms = 5000
|
||||
quota.window.num = 11
|
||||
log.partition.strategy = count
|
||||
zookeeper.connect = 53.1.213.24:24002,53.1.213.23:24002,53.1.213.22:24002/kafka
|
||||
authorizer.class.name = org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer
|
||||
auto.reassign.check.interval.ms = 600000
|
||||
user.group.cache.timeout.sec = 300
|
||||
auto.reassign.enable = true
|
||||
num.replica.fetchers = 1
|
||||
alter.log.dirs.replication.quota.window.size.seconds = 1
|
||||
allow.everyone.if.no.acl.found = false
|
||||
ip.mode = IPV4
|
||||
alter.log.dirs.replication.quota.window.num = 11
|
||||
log.roll.jitter.hours = 0
|
||||
tmp.zookeeper.connect = 53.1.213.24:24002,53.1.213.23:24002,53.1.213.22:24002
|
||||
log.cleaner.enable = true
|
||||
offsets.load.buffer.size = 5242880
|
||||
log.cleaner.delete.retention.ms = 86400000
|
||||
ssl.client.auth = none
|
||||
controlled.shutdown.max.retries = 3
|
||||
queued.max.requests = 500
|
||||
metrics.reporter.max.request.size = 104857600
|
||||
offsets.topic.replication.factor = 3
|
||||
log.cleaner.threads = 1
|
||||
transaction.state.log.min.isr = 2
|
||||
sasl.kerberos.service.name = kafka
|
||||
sasl.kerberos.ticket.renew.jitter = 0.05
|
||||
socket.request.max.bytes = 104857600
|
||||
zookeeper.session.timeout.ms = 45000
|
||||
log.retention.bytes = -1
|
||||
log.message.timestamp.type = CreateTime
|
||||
request.total.time.ms.threshold = 30000
|
||||
sasl.kerberos.min.time.before.relogin = 60000
|
||||
zookeeper.set.acl = true
|
||||
connections.max.idle.ms = 600000
|
||||
offsets.retention.minutes = 10080
|
||||
delegation.token.expiry.time.ms = 86400000
|
||||
max.connections = 2147483647
|
||||
is.security.mode = yes
|
||||
transaction.state.log.num.partitions = 50
|
||||
inter.broker.protocol.version = 3.6-IV1
|
||||
replica.fetch.backoff.ms = 1000
|
||||
kafka.metrics.reporters = com.huawei.kafka.PartitionStatusReporter
|
||||
listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL,TRACE:SASL_PLAINTEXT
|
||||
log.retention.hours = 168
|
||||
num.partitions = 2
|
||||
listeners = SASL_PLAINTEXT://53.1.213.25:21007,PLAINTEXT://53.1.213.25:21005,SSL://53.1.213.25:21008,SASL_SSL://53.1.213.25:21009,TRACE://53.1.213.25:21013
|
||||
ssl.enabled.protocols = TLSv1.2
|
||||
delete.records.purgatory.purge.interval.requests = 1
|
||||
monitor.zk.normal.connect = 53.1.213.24:24002,53.1.213.23:24002,53.1.213.22:24002
|
||||
ssl.cipher.suites = TLS_DHE_DSS_WITH_AES_128_GCM_SHA256,TLS_DHE_DSS_WITH_AES_256_GCM_SHA384,TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
|
||||
log.flush.scheduler.interval.ms = 9223372036854775807
|
||||
sasl.port = 21007
|
||||
ssl.mode.enable = true
|
||||
security.protocol = SASL_PLAINTEXT
|
||||
log.index.size.max.bytes = 10485760
|
||||
rack.aware.enable = false
|
||||
security.inter.broker.protocol = SASL_PLAINTEXT
|
||||
replica.fetch.max.bytes = 104857600
|
||||
log.cleaner.dedupe.buffer.size = 134217728
|
||||
replica.high.watermark.checkpoint.interval.ms = 5000
|
||||
replication.quota.window.size.seconds = 1
|
||||
log.cleaner.io.buffer.size = 524288
|
||||
sasl.kerberos.ticket.renew.window.factor = 0.8
|
||||
metrics.reporter.zookeeper.url = 53.1.213.24:24002,53.1.213.23:24002,53.1.213.22:24002/kafka
|
||||
max.connections.per.user.enable = true
|
||||
bootstrap.servers = 53.1.213.27:21007,53.1.213.26:21007,53.1.213.25:21007
|
||||
metrics.reporter.sasl.kerberos.service.name = kafka
|
||||
zookeeper.connection.timeout.ms = 45000
|
||||
metrics.recording.level = INFO
|
||||
metrics.reporter.bootstrap.servers = 53.1.213.27:21009,53.1.213.26:21009,53.1.213.25:21009
|
||||
controlled.shutdown.retry.backoff.ms = 5000
|
||||
sasl-ssl.port = 21009
|
||||
advertised.broker.id.port.map =
|
||||
listener.name.sasl_ssl.plain.sasl.server.callback.handler.class = com.huawei.kafka.plain.PlainCallBackHandler
|
||||
log.roll.hours = 168
|
||||
log.cleanup.policy = delete
|
||||
log.flush.start.offset.checkpoint.interval.ms = 60000
|
||||
host.name = 53.1.213.25
|
||||
max.connections.per.user.overrides =
|
||||
max.connections.per.user.whitelist = kafka,default#principal
|
||||
transaction.state.log.segment.bytes = 104857600
|
||||
max.connections.per.ip = 2147483647
|
||||
offsets.topic.segment.bytes = 104857600
|
||||
background.threads = 10
|
||||
quota.consumer.default = 9223372036854775807
|
||||
request.timeout.ms = 30000
|
||||
log.message.format.version = 3.6-IV1
|
||||
group.initial.rebalance.delay.ms = 3000
|
||||
log.index.interval.bytes = 4096
|
||||
log.segment.bytes = 1073741824
|
||||
log.cleaner.backoff.ms = 15000
|
||||
kafka.zookeeper.root = /kafka
|
||||
offset.metadata.max.bytes = 4096
|
||||
ssl.truststore.location = #{conf_dir}/truststore.jks
|
||||
group.max.session.timeout.ms = 1800000
|
||||
replica.fetch.response.max.bytes = 104857600
|
||||
port = 21005
|
||||
zookeeper.sync.time.ms = 2000
|
||||
log.segment.delete.delay.ms = 60000
|
||||
ssl.port = 21008
|
||||
fetch.max.bytes = 115343360
|
||||
user.group.query.retry.backoff.ms = 300
|
||||
log.dirs = /srv/BigData/kafka/data1/kafka-logs,/srv/BigData/kafka/data2/kafka-logs,/srv/BigData/kafka/data3/kafka-logs,/srv/BigData/kafka/data4/kafka-logs
|
||||
monitor.keytab = /opt/huawei/Bigdata/om-agent/nodeagent/etc/agent/omm.keytab
|
||||
controlled.shutdown.enable = true
|
||||
az.aware.enable = false
|
||||
compression.type = producer
|
||||
max.connections.per.ip.overrides =
|
||||
log.message.timestamp.difference.max.ms = 9223372036854775807
|
||||
metrics.reporter.kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||
kafka.metrics.polling.interval.secs = 60
|
||||
advertised.listeners.protocol = SASL_SSL
|
||||
sasl.kerberos.kinit.cmd = /opt/huawei/Bigdata/FusionInsight_BASE_8.5.0/install/FusionInsight-kerberos-1.20/kerberos/bin/kinit
|
||||
transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000
|
||||
log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
|
||||
auto.leader.rebalance.enable = true
|
||||
leader.imbalance.check.interval.seconds = 3600
|
||||
log.cleaner.min.cleanable.ratio = 0.5
|
||||
user.group.query.retry = 10
|
||||
replica.lag.time.max.ms = 60000
|
||||
max.incremental.fetch.session.cache.slots = 1000
|
||||
delegation.token.master.key = null
|
||||
num.network.threads = 6
|
||||
reserved.broker.max.id = 65535
|
||||
listener.name.external_sasl_plaintext.plain.sasl.server.callback.handler.class = com.huawei.kafka.plain.PlainCallBackHandler
|
||||
monitor.principal = oms/manager@A528C942_01A6_1BEF_7A75_0187DC82C40F.COM
|
||||
transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
|
||||
socket.send.buffer.bytes = 1024000
|
||||
log.message.downconversion.enable = true
|
||||
advertised.broker.id.ip.map =
|
||||
metrics.reporter.security.protocol = SASL_SSL
|
||||
transaction.state.log.load.buffer.size = 5242880
|
||||
socket.receive.buffer.bytes = 1024000
|
||||
ssl.keystore.location = #{conf_dir}/kafka_broker.jks
|
||||
replica.fetch.min.bytes = 1
|
||||
broker.rack = /default/rack0
|
||||
controller.port = 21013
|
||||
unclean.leader.election.enable = false
|
||||
sasl.enabled.mechanisms = GSSAPI,PLAIN
|
||||
group.min.session.timeout.ms = 6000
|
||||
offsets.retention.check.interval.ms = 600000
|
||||
log.cleaner.io.buffer.load.factor = 0.9
|
||||
transaction.max.timeout.ms = 900000
|
||||
kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||
producer.purgatory.purge.interval.requests = 1000
|
||||
group.max.size = 2147483647
|
||||
broker.id = 1
|
||||
offsets.topic.compression.codec = 0
|
||||
delegation.token.max.lifetime.ms = 604800000
|
||||
replication.quota.window.num = 11
|
||||
enable.advertised.listener = false
|
||||
log.retention.check.interval.ms = 300000
|
||||
leader.imbalance.per.broker.percentage = 10
|
||||
queued.max.request.bytes = -1
|
||||
Binary file not shown.
|
|
@ -53,6 +53,11 @@
|
|||
<artifactId>stwzhj-common-mybatis</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.dromara</groupId>
|
||||
<artifactId>stwzhj-common-job</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.dromara</groupId>
|
||||
<artifactId>stwzhj-common-dubbo</artifactId>
|
||||
|
|
@ -114,46 +119,77 @@
|
|||
<artifactId>stwzhj-api-data2es</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!--elasticsearch-->
|
||||
<dependency>
|
||||
<groupId>org.elasticsearch</groupId>
|
||||
<artifactId>elasticsearch</artifactId>
|
||||
<version>7.14.0</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>log4j-api</artifactId>
|
||||
<groupId>org.apache.logging.log4j</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.elasticsearch.client</groupId>
|
||||
<artifactId>elasticsearch-rest-client</artifactId>
|
||||
<version>7.14.0</version>
|
||||
<version>7.10.2-h0.cbu.mrs.350.r11</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.elasticsearch.client</groupId>
|
||||
<artifactId>elasticsearch-rest-high-level-client</artifactId>
|
||||
<version>7.14.0</version>
|
||||
<version>7.10.2-h0.cbu.mrs.350.r11</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.elasticsearch</groupId>
|
||||
<artifactId>elasticsearch</artifactId>
|
||||
<groupId>org.elasticsearch.plugin</groupId>
|
||||
<artifactId>parent-join-client</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<artifactId>elasticsearch-rest-client</artifactId>
|
||||
<groupId>org.elasticsearch.client</groupId>
|
||||
<groupId>org.elasticsearch.plugin</groupId>
|
||||
<artifactId>aggs-matrix-stats-client</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
|
||||
<!-- kafka -->
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
<groupId>org.elasticsearch</groupId>
|
||||
<artifactId>elasticsearch</artifactId>
|
||||
<version>7.10.2-h0.cbu.mrs.350.r11</version>
|
||||
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.12</artifactId>
|
||||
<version>3.6.1-h0.cbu.mrs.350.r11</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.apache.zookeeper</groupId>
|
||||
<artifactId>zookeeper</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>net.sf.jopt-simple</groupId>
|
||||
<artifactId>jopt-simple</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>com.huawei.mrs</groupId>
|
||||
<artifactId>manager-wc2frm</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.xerial.snappy</groupId>
|
||||
<artifactId>snappy-java</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>com.huawei.mrs</groupId>
|
||||
<artifactId>om-controller-api</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>com.101tec</groupId>
|
||||
<artifactId>zkclient</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>3.6.1-h0.cbu.mrs.350.r11</version>
|
||||
</dependency>
|
||||
|
||||
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
|
|
|
|||
|
|
@ -20,10 +20,11 @@ import java.util.List;
|
|||
/**
|
||||
* restHighLevelClient 客户端配置类
|
||||
*/
|
||||
@Slf4j
|
||||
|
||||
/*@Slf4j
|
||||
@Data
|
||||
@Configuration
|
||||
@ConfigurationProperties(prefix = "elasticsearch")
|
||||
@ConfigurationProperties(prefix = "elasticsearch")*/
|
||||
public class ElasticsearchConfig {
|
||||
|
||||
// es host ip 地址(集群)
|
||||
|
|
@ -85,7 +86,7 @@ public class ElasticsearchConfig {
|
|||
});
|
||||
restHighLevelClient = new RestHighLevelClient(builder);
|
||||
} catch (NumberFormatException e) {
|
||||
log.error("ES 连接池初始化异常");
|
||||
// log.error("ES 连接池初始化异常");
|
||||
}
|
||||
return restHighLevelClient;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,40 @@
|
|||
package org.dromara.data2es.config;
|
||||
|
||||
import org.dromara.data2es.util.GenerateEnumUtil;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.hwclient.HwRestClient;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import java.time.LocalDate;
|
||||
import java.time.format.DateTimeFormatter;
|
||||
|
||||
/**
|
||||
* <p>description: </p>
|
||||
*
|
||||
* @author chenle
|
||||
* @date 2021-07-05 18:22
|
||||
*/
|
||||
@Component(value = "esConfig")
|
||||
public class EsConfig {
|
||||
|
||||
private String prefix = "gpsinfo";
|
||||
|
||||
public String indexNameByDay(){
|
||||
return prefix+ LocalDate.now().format(DateTimeFormatter.ofPattern("yyyyMMdd"));
|
||||
}
|
||||
|
||||
@Bean(destroyMethod = "close",name = "restHighLevelClient")
|
||||
public RestHighLevelClient restClient() {
|
||||
// String configPath = System.getProperty("user.dir") + File.separator+ "app_data2es_aq" + File.separator + "conf" + File.separator;
|
||||
String configPath = "/rsoft/config/";
|
||||
|
||||
// KAFKA("KafkaClient"), ZOOKEEPER("Client");
|
||||
// GenerateEnumUtil.addEnum(LoginUtil.Module.class,"KAFKA","KafkaClient");
|
||||
// GenerateEnumUtil.addEnum(LoginUtil.Module.class,"ZOOKEEPER","Client");
|
||||
HwRestClient hwRestClient = new HwRestClient(configPath);
|
||||
RestHighLevelClient highLevelClient = new RestHighLevelClient(hwRestClient.getRestClientBuilder());
|
||||
return highLevelClient;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,227 @@
|
|||
package org.dromara.data2es.config;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.client.config.RequestConfig;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
import org.elasticsearch.client.Node;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.RestClientBuilder;
|
||||
import org.springframework.core.io.ClassPathResource;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
|
||||
/**
|
||||
* <p>description: </p>
|
||||
*
|
||||
* @author chenle
|
||||
* @date 2021-11-01 14:25
|
||||
*/
|
||||
public class HwRestClient {
|
||||
private static final Log LOG = LogFactory.getLog(HwRestClient.class);
|
||||
private static String isSecureMode;
|
||||
private static String esServerHost;
|
||||
private static int connectTimeout;
|
||||
private static int socketTimeout;
|
||||
private static int connectionRequestTimeout;
|
||||
private static int maxConnTotal;
|
||||
private static int maxConnPerRoute;
|
||||
private static String principal;
|
||||
private static final String UN_SECURITY_MODE = "false";
|
||||
private static final String COLON = ":";
|
||||
private static final String COMMA = ",";
|
||||
private static HttpHost[] hostArray;
|
||||
private String configPath;
|
||||
private static boolean SNIFFER_ENABLE = false;
|
||||
|
||||
private static final String username = "yhy_ahrs_rcw@HADOOP.COM";
|
||||
|
||||
public static final String password = "Ycgis!2509";
|
||||
|
||||
public HwRestClient(String configPath) {
|
||||
this.configPath = configPath;
|
||||
if (!this.getConfig()) {
|
||||
LOG.error("Get config failed.");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public HwRestClient() {
|
||||
/*this.configPath = System.getProperty("user.dir") + File.separator + "conf" + File.separator;
|
||||
if (!this.getConfig()) {
|
||||
LOG.error("Get config failed.");
|
||||
}*/
|
||||
if (!this.getConfig()) {
|
||||
LOG.error("Get config failed.");
|
||||
}
|
||||
this.configPath = "/rsoft/config/";
|
||||
}
|
||||
|
||||
private boolean getConfig() {
|
||||
Properties properties = new Properties();
|
||||
ClassPathResource classPathResource = new ClassPathResource("/esParams.properties");
|
||||
|
||||
try {
|
||||
properties.load(classPathResource.getInputStream());
|
||||
} catch (IOException var5) {
|
||||
LOG.error("Failed to load properties file : " + var5.getMessage());
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
esServerHost = properties.getProperty("esServerHost");
|
||||
connectTimeout = Integer.valueOf(properties.getProperty("connectTimeout"));
|
||||
socketTimeout = Integer.valueOf(properties.getProperty("socketTimeout"));
|
||||
connectionRequestTimeout = Integer.valueOf(properties.getProperty("connectionRequestTimeout"));
|
||||
maxConnPerRoute = Integer.valueOf(properties.getProperty("maxConnPerRoute"));
|
||||
maxConnTotal = Integer.valueOf(properties.getProperty("maxConnTotal"));
|
||||
isSecureMode = properties.getProperty("isSecureMode");
|
||||
principal = properties.getProperty("principal");
|
||||
SNIFFER_ENABLE = Boolean.valueOf(properties.getProperty("snifferEnable"));
|
||||
LOG.info("esServerHost:" + esServerHost);
|
||||
LOG.info("connectTimeout:" + connectTimeout);
|
||||
LOG.info("socketTimeout:" + socketTimeout);
|
||||
LOG.info("connectionRequestTimeout:" + connectionRequestTimeout);
|
||||
LOG.info("maxConnPerRouteTotal:" + maxConnPerRoute);
|
||||
LOG.info("maxConnTotal:" + maxConnTotal);
|
||||
LOG.info("isSecureMode:" + isSecureMode);
|
||||
LOG.info("principal:" + principal);
|
||||
return true;
|
||||
} catch (NumberFormatException var4) {
|
||||
LOG.error("Failed to get parameters !", var4);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isSnifferEnable() {
|
||||
return SNIFFER_ENABLE;
|
||||
}
|
||||
|
||||
private HttpHost[] getHostArray() {
|
||||
String schema;
|
||||
if ("false".equals(isSecureMode)) {
|
||||
schema = "http";
|
||||
} else {
|
||||
schema = "https";
|
||||
}
|
||||
|
||||
List<HttpHost> hosts = new ArrayList();
|
||||
String[] hostArray1 = esServerHost.split(",");
|
||||
String[] var4 = hostArray1;
|
||||
int var5 = hostArray1.length;
|
||||
|
||||
for (int var6 = 0; var6 < var5; ++var6) {
|
||||
String host = var4[var6];
|
||||
String[] ipPort = host.split(":");
|
||||
HttpHost hostNew = new HttpHost(ipPort[0], Integer.valueOf(ipPort[1]), schema);
|
||||
hosts.add(hostNew);
|
||||
}
|
||||
|
||||
return (HttpHost[]) hosts.toArray(new HttpHost[0]);
|
||||
}
|
||||
|
||||
private void setSecConfig() {
|
||||
try {
|
||||
LOG.info("Config path is " + this.configPath);
|
||||
LoginUtil.setJaasFile(principal, this.configPath + "user.keytab");
|
||||
LoginUtil.setKrb5Config(this.configPath + "krb5.conf");
|
||||
System.setProperty("elasticsearch.kerberos.jaas.appname", "EsClient");
|
||||
System.setProperty("es.security.indication", "true");
|
||||
LOG.info("es.security.indication is " + System.getProperty("es.security.indication"));
|
||||
} catch (Exception var2) {
|
||||
LOG.error("Failed to set security conf", var2);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public RestClientBuilder getRestClientBuilder() {
|
||||
hostArray = getHostArray();
|
||||
if ("false".equals(isSecureMode)) {
|
||||
System.setProperty("es.security.indication", "false");
|
||||
} else {
|
||||
setSecConfig();
|
||||
}
|
||||
|
||||
RestClientBuilder builder = RestClient.builder(hostArray);
|
||||
Header[] defaultHeaders = new Header[]{new BasicHeader("Accept", "application/json"), new BasicHeader("Content-type", "application/json")};
|
||||
builder.setRequestConfigCallback(new RestClientBuilder.RequestConfigCallback() {
|
||||
@Override
|
||||
public RequestConfig.Builder customizeRequestConfig(RequestConfig.Builder requestConfigBuilder) {
|
||||
|
||||
return requestConfigBuilder.setConnectTimeout(HwRestClient.connectTimeout).
|
||||
setSocketTimeout(HwRestClient.socketTimeout).
|
||||
setConnectionRequestTimeout(HwRestClient.connectionRequestTimeout);
|
||||
}
|
||||
});
|
||||
builder.setDefaultHeaders(defaultHeaders);
|
||||
|
||||
/* es https */
|
||||
/*try {
|
||||
final CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
|
||||
credentialsProvider.setCredentials(AuthScope.ANY,
|
||||
new UsernamePasswordCredentials(username, password));
|
||||
|
||||
SSLContext sslContext = new SSLContextBuilder().loadTrustMaterial(null, new TrustStrategy() {
|
||||
// 信任所有
|
||||
@Override
|
||||
public boolean isTrusted(X509Certificate[] chain, String authType) throws CertificateException {
|
||||
return true;
|
||||
}
|
||||
}).build();
|
||||
SSLIOSessionStrategy sessionStrategy = new SSLIOSessionStrategy(sslContext, NoopHostnameVerifier.INSTANCE);
|
||||
builder.setHttpClientConfigCallback(new RestClientBuilder.HttpClientConfigCallback() {
|
||||
@Override
|
||||
public HttpAsyncClientBuilder customizeHttpClient(HttpAsyncClientBuilder httpClientBuilder) {
|
||||
httpClientBuilder.disableAuthCaching();
|
||||
//httpClientBuilder.setSSLStrategy(sessionStrategy);
|
||||
httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider);
|
||||
return httpClientBuilder;
|
||||
}
|
||||
});
|
||||
|
||||
}catch (Exception e){
|
||||
e.printStackTrace();
|
||||
}*/
|
||||
/* es https */
|
||||
return builder;
|
||||
}
|
||||
|
||||
public RestClient getRestClient() {
|
||||
if (this.ifConfigWasWrong()) {
|
||||
return null;
|
||||
} else {
|
||||
RestClientBuilder restClientBuilder = this.getRestClientBuilder();
|
||||
RestClient restClient = restClientBuilder.build();
|
||||
this.setNodes(restClient);
|
||||
LOG.info("The Low Level Rest Client has been created.");
|
||||
return restClient;
|
||||
}
|
||||
}
|
||||
|
||||
private boolean ifConfigWasWrong() {
|
||||
if (this.configPath != null && this.configPath.length() != 0) {
|
||||
return false;
|
||||
} else {
|
||||
LOG.info("Config path is not allowed to be empty.");
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
private void setNodes(RestClient restClient) {
|
||||
List<Node> nodes = new ArrayList();
|
||||
HttpHost[] var3 = hostArray;
|
||||
int var4 = var3.length;
|
||||
|
||||
for (int var5 = 0; var5 < var4; ++var5) {
|
||||
HttpHost httpHost = var3[var5];
|
||||
nodes.add(new Node(httpHost));
|
||||
}
|
||||
|
||||
restClient.setNodes(nodes);
|
||||
}
|
||||
}
|
||||
|
|
@ -1,148 +0,0 @@
|
|||
package org.dromara.data2es.config;
|
||||
|
||||
import org.apache.kafka.clients.admin.NewTopic;
|
||||
import org.apache.kafka.clients.producer.KafkaProducer;
|
||||
import org.dromara.data2es.producer.NewProducer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.kafka.core.KafkaAdmin;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Properties;
|
||||
|
||||
/**
|
||||
* <p>description: </p>
|
||||
*
|
||||
* @author chenle
|
||||
* @date 2021-11-03 14:15
|
||||
*/
|
||||
//@Component
|
||||
public class KafkaConfig {
|
||||
|
||||
private Logger logger = LoggerFactory.getLogger(KafkaConfig.class);
|
||||
|
||||
// private String kafkaServers = "140.168.2.31:21007,140.168.2.32:21007,140.168.2.33:21007";
|
||||
// private String kafkaServers = "53.208.61.105:6667,53.208.61.106:6667,53.208.61.107:6667";//六安GA网
|
||||
// private String kafkaServers = "34.72.62.93:9092";//六安视频网
|
||||
// private String kafkaServers = "127.0.0.1:9092";//本地
|
||||
private String kafkaServers = "53.207.8.71:9092,53.193.3.15:9092,53.160.0.237:9092,53.104.56.58:9092,53.128.22.61:9092";//省厅 马伟提供
|
||||
|
||||
private String groupId = "ruansiProducer";
|
||||
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(NewProducer.class);
|
||||
|
||||
|
||||
// Broker地址列表
|
||||
private final String bootstrapServers = "bootstrap.servers";
|
||||
|
||||
// 客户端ID
|
||||
private final String clientId = "client.id";
|
||||
|
||||
// Key序列化类
|
||||
private final String keySerializer = "key.serializer";
|
||||
|
||||
// Value序列化类
|
||||
private final String valueSerializer = "value.serializer";
|
||||
|
||||
// 协议类型:当前支持配置为SASL_PLAINTEXT或者PLAINTEXT
|
||||
private final String securityProtocol = "security.protocol";
|
||||
|
||||
// 服务名
|
||||
private final String saslKerberosServiceName = "sasl.kerberos.service.name";
|
||||
|
||||
// 域名
|
||||
private final String kerberosDomainName = "kerberos.domain.name";
|
||||
|
||||
//默认发送20条消息
|
||||
private final int messageNumToSend = 100;
|
||||
|
||||
/**
|
||||
* 用户自己申请的机机账号keytab文件名称
|
||||
*/
|
||||
private static final String USER_KEYTAB_FILE = "请修改为真实keytab文件名";
|
||||
|
||||
/**
|
||||
* 用户自己申请的机机账号名称
|
||||
*/
|
||||
private static final String USER_PRINCIPAL = "请修改为真实用户名称";
|
||||
|
||||
/**
|
||||
* 新Producer 构造函数
|
||||
* @param
|
||||
* @param
|
||||
*/
|
||||
|
||||
@Bean(name = "myKafkaProducer")
|
||||
public KafkaProducer newProducer() {
|
||||
Properties props = new Properties();
|
||||
|
||||
if (KafkaSecurityUtil.isSecurityModel())
|
||||
{
|
||||
try
|
||||
{
|
||||
logger.info("Securitymode start.");
|
||||
//!!注意,安全认证时,需要用户手动修改为自己申请的机机账号
|
||||
KafkaSecurityUtil.securityPrepare();
|
||||
props.put(securityProtocol, "SASL_PLAINTEXT");
|
||||
// 服务名
|
||||
props.put(saslKerberosServiceName, "kafka");
|
||||
// 域名
|
||||
props.put(kerberosDomainName, "hadoop.hadoop.com");
|
||||
}
|
||||
catch (IOException e)
|
||||
{
|
||||
logger.error("Security prepare failure.");
|
||||
logger.error("The IOException occured.", e);
|
||||
return null;
|
||||
}
|
||||
logger.info("Security prepare success.");
|
||||
}else{
|
||||
props.put(securityProtocol, "PLAINTEXT");
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Broker地址列表
|
||||
props.put(bootstrapServers,kafkaServers);
|
||||
// 客户端ID
|
||||
// props.put(clientId, "ruansiProducer");
|
||||
// Key序列化类
|
||||
props.put(keySerializer,
|
||||
"org.apache.kafka.common.serialization.IntegerSerializer");
|
||||
// Value序列化类
|
||||
props.put(valueSerializer,
|
||||
"org.apache.kafka.common.serialization.StringSerializer");
|
||||
//批量发送信息配置
|
||||
props.put("batch.size", 16384);
|
||||
props.put("linger.ms", 1);
|
||||
props.put("buffer.memory", 33554432);
|
||||
// 协议类型:当前支持配置为SASL_PLAINTEXT或者PLAINTEXT
|
||||
//props.put(securityProtocol, "SASL_PLAINTEXT");
|
||||
// // 服务名
|
||||
// props.put(saslKerberosServiceName, "kafka");
|
||||
// // 域名
|
||||
// props.put(kerberosDomainName, "hadoop.hadoop.com");
|
||||
//设置自定义的分区策略类,默认不传key,是粘性分区,尽量往一个分区中发消息。如果key不为null,则默认是按照key的hashcode与 partition的取余来决定哪个partition
|
||||
//props.put("partitioner.class","com.kafka.myparitioner.CidPartitioner");
|
||||
props.put(securityProtocol, "SASL_PLAINTEXT");
|
||||
props.put("sasl.jaas.config", "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"zkxc\" password=\"zkxcKafka07252023\";");
|
||||
props.put("sasl.mechanism", "SCRAM-SHA-256");
|
||||
KafkaProducer<String, String> producer = new KafkaProducer<>(props);
|
||||
// KafkaProducer producer = new KafkaProducer<>(props);
|
||||
|
||||
return producer;
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KafkaAdmin admin(KafkaProperties properties){
|
||||
KafkaAdmin admin = new KafkaAdmin(properties.buildAdminProperties());
|
||||
admin.setFatalIfBrokerNotAvailable(true);
|
||||
return admin;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,138 @@
|
|||
package org.dromara.data2es.config;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.Properties;
|
||||
|
||||
public final class KafkaProperties
|
||||
{
|
||||
private static final Logger LOG = LoggerFactory.getLogger(KafkaProperties.class);
|
||||
|
||||
// Topic名称,安全模式下,需要以管理员用户添加当前用户的访问权限
|
||||
public final static String TOPIC = "jysb_dwxx";
|
||||
|
||||
private static Properties serverProps = new Properties();
|
||||
|
||||
private static Properties producerProps = new Properties();
|
||||
|
||||
private static Properties consumerProps = new Properties();
|
||||
|
||||
private static Properties clientProps = new Properties();
|
||||
|
||||
private static KafkaProperties instance = null;
|
||||
|
||||
private KafkaProperties()
|
||||
{
|
||||
// String filePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator;
|
||||
String filePath = "/home/rsoft/config/";
|
||||
LOG.info("路径=={}",filePath);
|
||||
try
|
||||
{
|
||||
File proFile = new File(filePath + "producer.properties");
|
||||
|
||||
if (proFile.exists())
|
||||
{
|
||||
producerProps.load(new FileInputStream(filePath + "producer.properties"));
|
||||
}
|
||||
|
||||
File conFile = new File(filePath + "producer.properties");
|
||||
|
||||
if (conFile.exists())
|
||||
{
|
||||
consumerProps.load(new FileInputStream(filePath + "consumer.properties"));
|
||||
}
|
||||
|
||||
File serFile = new File(filePath + "server.properties");
|
||||
|
||||
if (serFile.exists())
|
||||
{
|
||||
serverProps.load(new FileInputStream(filePath + "server.properties"));
|
||||
}
|
||||
|
||||
File cliFile = new File(filePath + "client.properties");
|
||||
|
||||
if (cliFile.exists())
|
||||
{
|
||||
clientProps.load(new FileInputStream(filePath + "client.properties"));
|
||||
}
|
||||
}
|
||||
catch (IOException e)
|
||||
{
|
||||
LOG.info("The Exception occured.", e);
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized static KafkaProperties getInstance()
|
||||
{
|
||||
if (null == instance)
|
||||
{
|
||||
instance = new KafkaProperties();
|
||||
}
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取参数值
|
||||
* @param key properites的key值
|
||||
* @param defValue 默认值
|
||||
* @return
|
||||
*/
|
||||
public String getValues(String key, String defValue)
|
||||
{
|
||||
String rtValue = null;
|
||||
|
||||
if (null == key)
|
||||
{
|
||||
LOG.error("key is null");
|
||||
}
|
||||
else
|
||||
{
|
||||
rtValue = getPropertiesValue(key);
|
||||
}
|
||||
|
||||
if (null == rtValue)
|
||||
{
|
||||
LOG.warn("KafkaProperties.getValues return null, key is " + key);
|
||||
rtValue = defValue;
|
||||
}
|
||||
|
||||
LOG.info("KafkaProperties.getValues: key is " + key + "; Value is " + rtValue);
|
||||
|
||||
return rtValue;
|
||||
}
|
||||
|
||||
/**
|
||||
* 根据key值获取server.properties的值
|
||||
* @param key
|
||||
* @return
|
||||
*/
|
||||
private String getPropertiesValue(String key)
|
||||
{
|
||||
String rtValue = serverProps.getProperty(key);
|
||||
|
||||
// server.properties中没有,则再向producer.properties中获取
|
||||
if (null == rtValue)
|
||||
{
|
||||
rtValue = producerProps.getProperty(key);
|
||||
}
|
||||
|
||||
// producer中没有,则再向consumer.properties中获取
|
||||
if (null == rtValue)
|
||||
{
|
||||
rtValue = consumerProps.getProperty(key);
|
||||
}
|
||||
|
||||
// consumer没有,则再向client.properties中获取
|
||||
if (null == rtValue)
|
||||
{
|
||||
rtValue = clientProps.getProperty(key);
|
||||
}
|
||||
|
||||
return rtValue;
|
||||
}
|
||||
}
|
||||
|
|
@ -1,93 +0,0 @@
|
|||
package org.dromara.data2es.config;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.Properties;
|
||||
|
||||
/**
|
||||
* <p>description: </p>
|
||||
*
|
||||
* @author chenle
|
||||
* @date 2021-10-28 14:48
|
||||
*/
|
||||
public class KafkaSecurityUtil {
|
||||
|
||||
static Logger logger = LoggerFactory.getLogger(KafkaSecurityUtil.class);
|
||||
/**
|
||||
* 用户自己申请的机机账号keytab文件名称
|
||||
*/
|
||||
private static final String USER_KEYTAB_FILE = "user.keytab";
|
||||
|
||||
/**
|
||||
* 用户自己申请的机机账号名称
|
||||
*/
|
||||
private static final String USER_PRINCIPAL = "aqdsj_ruansi@HADOOP.COM";
|
||||
|
||||
public static void securityPrepare() throws IOException
|
||||
{
|
||||
//logger.error("进入了---securityPrepare");
|
||||
//String filePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator;
|
||||
//String krbFile = filePath + "krb5.conf";
|
||||
//ClassPathResource classPathResource = new ClassPathResource("krb5.conf");
|
||||
//String krbFile = classPathResource.getAbsolutePath();
|
||||
String krbFile = "/gpsstore/krb5.conf";
|
||||
// String userKeyTableFile = filePath + USER_KEYTAB_FILE;
|
||||
//ClassPathResource classPathResource1 = new ClassPathResource(USER_KEYTAB_FILE);
|
||||
String userKeyTableFile = "/gpsstore/user.keytab";
|
||||
|
||||
//windows路径下分隔符替换
|
||||
userKeyTableFile = userKeyTableFile.replace("\\", "\\\\");
|
||||
krbFile = krbFile.replace("\\", "\\\\");
|
||||
|
||||
LoginUtil.setKrb5Config(krbFile);
|
||||
LoginUtil.setZookeeperServerPrincipal("zookeeper/hadoop.hadoop.com");
|
||||
//logger.error("userKeyTableFile路径---{}",userKeyTableFile);
|
||||
LoginUtil.setJaasFile(USER_PRINCIPAL, userKeyTableFile);
|
||||
}
|
||||
|
||||
public static Boolean isSecurityModel()
|
||||
{
|
||||
Boolean isSecurity = false;
|
||||
//String krbFilePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator + "kafkaSecurityMode";
|
||||
//ClassPathResource classPathResource = new ClassPathResource("kafkaSecurityMode");
|
||||
InputStream inputStream = Thread.currentThread().getContextClassLoader().getResourceAsStream("kafkaSecurityMode");
|
||||
|
||||
/*File file = classPathResource.getFile();
|
||||
|
||||
if(!file.exists()){
|
||||
return isSecurity;
|
||||
}*/
|
||||
|
||||
Properties securityProps = new Properties();
|
||||
|
||||
|
||||
try
|
||||
{
|
||||
securityProps.load(inputStream);
|
||||
if ("yes".equalsIgnoreCase(securityProps.getProperty("kafka.client.security.mode")))
|
||||
{
|
||||
isSecurity = true;
|
||||
}
|
||||
}
|
||||
catch (Exception e)
|
||||
{
|
||||
logger.info("The Exception occured : {}.", e);
|
||||
}
|
||||
|
||||
return isSecurity;
|
||||
}
|
||||
|
||||
/*
|
||||
* 判断文件是否存在
|
||||
*/
|
||||
private static boolean isFileExists(String fileName)
|
||||
{
|
||||
File file = new File(fileName);
|
||||
|
||||
return file.exists();
|
||||
}
|
||||
}
|
||||
|
|
@ -1,144 +1,96 @@
|
|||
package org.dromara.data2es.config;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.util.Properties;
|
||||
|
||||
/**
|
||||
* <p>description: </p>
|
||||
*
|
||||
* @author chenle
|
||||
* @date 2021-11-01 14:30
|
||||
*/
|
||||
public class LoginUtil {
|
||||
private static final Log LOG = LogFactory.getLog(LoginUtil.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(LoginUtil.class);
|
||||
|
||||
/**
|
||||
* no JavaDoc
|
||||
*/
|
||||
public enum Module {
|
||||
KAFKA("KafkaClient"), ZOOKEEPER("Client");
|
||||
|
||||
private String name;
|
||||
|
||||
private Module(String name)
|
||||
{
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public String getName()
|
||||
{
|
||||
return name;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* line operator string
|
||||
*/
|
||||
private static final String LINE_SEPARATOR = System.getProperty("line.separator");
|
||||
private static final String ES = "es.";
|
||||
|
||||
/**
|
||||
* jaas file postfix
|
||||
*/
|
||||
private static final String JAAS_POSTFIX = ".jaas.conf";
|
||||
private static final String IBM_LOGIN_MODULE = "com.ibm.security.auth.module.Krb5LoginModule required";
|
||||
private static final String SUN_LOGIN_MODULE = "com.sun.security.auth.module.Krb5LoginModule required";
|
||||
private static final String JAVA_SECURITY_LOGIN_CONF_KEY = "java.security.auth.login.config";
|
||||
private static final String JAVA_SECURITY_KRB5_CONF_KEY = "java.security.krb5.conf";
|
||||
|
||||
/**
|
||||
* is IBM jdk or not
|
||||
*/
|
||||
private static final boolean IS_IBM_JDK = System.getProperty("java.vendor").contains("IBM");
|
||||
private static final String ZOOKEEPER_AUTH_PRINCIPAL = "zookeeper.server.principal";
|
||||
private static boolean WriteFlag = false;
|
||||
|
||||
public LoginUtil() {
|
||||
}
|
||||
/**
|
||||
* IBM jdk login module
|
||||
*/
|
||||
private static final String IBM_LOGIN_MODULE = "com.ibm.security.auth.module.Krb5LoginModule required";
|
||||
|
||||
static void setKrb5Config(String krb5ConfFile) throws IOException {
|
||||
System.setProperty("java.security.krb5.conf", krb5ConfFile);
|
||||
String ret = System.getProperty("java.security.krb5.conf");
|
||||
if (ret == null) {
|
||||
LOG.error("java.security.krb5.conf is null.");
|
||||
throw new IOException("java.security.krb5.conf is null.");
|
||||
} else if (!ret.equals(krb5ConfFile)) {
|
||||
LOG.error("java.security.krb5.conf is " + ret + " is not " + krb5ConfFile + ".");
|
||||
throw new IOException("java.security.krb5.conf is " + ret + " is not " + krb5ConfFile + ".");
|
||||
}
|
||||
}
|
||||
/**
|
||||
* oracle jdk login module
|
||||
*/
|
||||
private static final String SUN_LOGIN_MODULE = "com.sun.security.auth.module.Krb5LoginModule required";
|
||||
|
||||
/**
|
||||
* Zookeeper quorum principal.
|
||||
*/
|
||||
public static final String ZOOKEEPER_AUTH_PRINCIPAL = "zookeeper.server.principal";
|
||||
|
||||
/**
|
||||
* java security krb5 file path
|
||||
*/
|
||||
public static final String JAVA_SECURITY_KRB5_CONF = "java.security.krb5.conf";
|
||||
|
||||
static synchronized void setJaasFile(String principal, String keytabPath) throws IOException {
|
||||
//String filePath = keytabPath.substring(0, keytabPath.lastIndexOf(File.separator));
|
||||
// String jaasPath = filePath + File.separator + "es." + System.getProperty("user.name") + ".jaas.conf";
|
||||
// jaasPath = jaasPath.replace("\\", "\\\\");
|
||||
/**
|
||||
* java security login file path
|
||||
*/
|
||||
public static final String JAVA_SECURITY_LOGIN_CONF = "java.security.auth.login.config";
|
||||
|
||||
/**
|
||||
* 设置jaas.conf文件
|
||||
*
|
||||
* @param principal
|
||||
* @param keytabPath
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void setJaasFile(String principal, String keytabPath)
|
||||
throws IOException {
|
||||
String jaasPath =
|
||||
new File(System.getProperty("java.io.tmpdir")) + File.separator + System.getProperty("user.name")
|
||||
+ JAAS_POSTFIX;
|
||||
|
||||
keytabPath = keytabPath.replace("\\", "\\\\");
|
||||
LOG.info("jaasPath is {} " + jaasPath);
|
||||
LOG.info("keytabPath is " + keytabPath);
|
||||
if ((new File(jaasPath)).exists()) {
|
||||
if (!WriteFlag) {
|
||||
// windows路径下分隔符替换
|
||||
jaasPath = jaasPath.replace("\\", "\\\\");
|
||||
// 删除jaas文件
|
||||
deleteJaasFile(jaasPath);
|
||||
writeJaasFile(jaasPath, principal, keytabPath);
|
||||
System.setProperty("java.security.auth.login.config", jaasPath);
|
||||
WriteFlag = true;
|
||||
System.setProperty(JAVA_SECURITY_LOGIN_CONF, jaasPath);
|
||||
}
|
||||
} else {
|
||||
writeJaasFile(jaasPath, principal, keytabPath);
|
||||
System.setProperty("java.security.auth.login.config", jaasPath);
|
||||
WriteFlag = true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static void writeJaasFile(String jaasPath, String principal, String keytabPath) throws IOException {
|
||||
try {
|
||||
FileWriter writer = new FileWriter(new File(jaasPath));
|
||||
|
||||
try {
|
||||
writer.write(getJaasConfContext(principal, keytabPath));
|
||||
writer.flush();
|
||||
} catch (Throwable var7) {
|
||||
try {
|
||||
writer.close();
|
||||
} catch (Throwable var6) {
|
||||
var7.addSuppressed(var6);
|
||||
}
|
||||
|
||||
throw var7;
|
||||
}
|
||||
|
||||
writer.close();
|
||||
} catch (IOException var8) {
|
||||
throw new IOException("Failed to create jaas.conf File");
|
||||
}
|
||||
}
|
||||
|
||||
private static void deleteJaasFile(String jaasPath) throws IOException {
|
||||
File jaasFile = new File(jaasPath);
|
||||
if (jaasFile.exists() && !jaasFile.delete()) {
|
||||
throw new IOException("Failed to delete exists jaas file.");
|
||||
}
|
||||
}
|
||||
|
||||
private static String getJaasConfContext(String principal, String keytabPath) {
|
||||
Module[] allModule = Module.values();
|
||||
StringBuilder builder = new StringBuilder();
|
||||
Module[] var4 = allModule;
|
||||
int var5 = allModule.length;
|
||||
|
||||
for(int var6 = 0; var6 < var5; ++var6) {
|
||||
Module modlue = var4[var6];
|
||||
builder.append(getModuleContext(principal, keytabPath, modlue));
|
||||
}
|
||||
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
private static String getModuleContext(String userPrincipal, String keyTabPath, Module module) {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
if (IS_IBM_JDK) {
|
||||
builder.append(module.getName()).append(" {").append(LINE_SEPARATOR);
|
||||
builder.append("com.ibm.security.auth.module.Krb5LoginModule required").append(LINE_SEPARATOR);
|
||||
builder.append("credsType=both").append(LINE_SEPARATOR);
|
||||
builder.append("principal=\"").append(userPrincipal).append("\"").append(LINE_SEPARATOR);
|
||||
builder.append("useKeytab=\"").append(keyTabPath).append("\"").append(LINE_SEPARATOR);
|
||||
builder.append("debug=true;").append(LINE_SEPARATOR);
|
||||
builder.append("};").append(LINE_SEPARATOR);
|
||||
} else {
|
||||
builder.append(module.getName()).append(" {").append(LINE_SEPARATOR);
|
||||
builder.append("com.sun.security.auth.module.Krb5LoginModule required").append(LINE_SEPARATOR);
|
||||
builder.append("useKeyTab=true").append(LINE_SEPARATOR);
|
||||
builder.append("keyTab=\"").append(keyTabPath).append("\"").append(LINE_SEPARATOR);
|
||||
builder.append("principal=\"").append(userPrincipal).append("\"").append(LINE_SEPARATOR);
|
||||
builder.append("useTicketCache=false").append(LINE_SEPARATOR);
|
||||
builder.append("storeKey=true").append(LINE_SEPARATOR);
|
||||
builder.append("debug=true;").append(LINE_SEPARATOR);
|
||||
builder.append("};").append(LINE_SEPARATOR);
|
||||
}
|
||||
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* 设置zookeeper服务端principal
|
||||
|
|
@ -147,8 +99,7 @@ public class LoginUtil {
|
|||
* @throws IOException
|
||||
*/
|
||||
public static void setZookeeperServerPrincipal(String zkServerPrincipal)
|
||||
throws IOException
|
||||
{
|
||||
throws IOException {
|
||||
System.setProperty(ZOOKEEPER_AUTH_PRINCIPAL, zkServerPrincipal);
|
||||
String ret = System.getProperty(ZOOKEEPER_AUTH_PRINCIPAL);
|
||||
if (ret == null)
|
||||
|
|
@ -161,17 +112,148 @@ public class LoginUtil {
|
|||
}
|
||||
}
|
||||
|
||||
public static enum Module {
|
||||
Elasticsearch("EsClient");
|
||||
|
||||
private String name;
|
||||
|
||||
private Module(String name) {
|
||||
this.name = name;
|
||||
/**
|
||||
* 设置krb5文件
|
||||
*
|
||||
* @param krb5ConfFile
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void setKrb5Config(String krb5ConfFile)
|
||||
throws IOException {
|
||||
System.setProperty(JAVA_SECURITY_KRB5_CONF, krb5ConfFile);
|
||||
String ret = System.getProperty(JAVA_SECURITY_KRB5_CONF);
|
||||
if (ret == null)
|
||||
{
|
||||
throw new IOException(JAVA_SECURITY_KRB5_CONF + " is null.");
|
||||
}
|
||||
if (!ret.equals(krb5ConfFile))
|
||||
{
|
||||
throw new IOException(JAVA_SECURITY_KRB5_CONF + " is " + ret + " is not " + krb5ConfFile + ".");
|
||||
}
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return this.name;
|
||||
/**
|
||||
* 写入jaas文件
|
||||
*
|
||||
* @throws IOException
|
||||
* 写文件异常
|
||||
*/
|
||||
private static void writeJaasFile(String jaasPath, String principal, String keytabPath)
|
||||
throws IOException {
|
||||
FileWriter writer = new FileWriter(new File(jaasPath));
|
||||
try
|
||||
{
|
||||
writer.write(getJaasConfContext(principal, keytabPath));
|
||||
writer.flush();
|
||||
}
|
||||
catch (IOException e)
|
||||
{
|
||||
throw new IOException("Failed to create jaas.conf File");
|
||||
}
|
||||
finally
|
||||
{
|
||||
writer.close();
|
||||
}
|
||||
}
|
||||
|
||||
private static void deleteJaasFile(String jaasPath)
|
||||
throws IOException {
|
||||
File jaasFile = new File(jaasPath);
|
||||
if (jaasFile.exists())
|
||||
{
|
||||
if (!jaasFile.delete())
|
||||
{
|
||||
throw new IOException("Failed to delete exists jaas file.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static String getJaasConfContext(String principal, String keytabPath) {
|
||||
Module[] allModule = Module.values();
|
||||
StringBuilder builder = new StringBuilder();
|
||||
for (Module modlue : allModule)
|
||||
{
|
||||
builder.append(getModuleContext(principal, keytabPath, modlue));
|
||||
}
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
private static String getModuleContext(String userPrincipal, String keyTabPath, Module module) {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
if (IS_IBM_JDK) {
|
||||
builder.append(module.getName()).append(" {").append(LINE_SEPARATOR);
|
||||
builder.append(IBM_LOGIN_MODULE).append(LINE_SEPARATOR);
|
||||
builder.append("credsType=both").append(LINE_SEPARATOR);
|
||||
builder.append("principal=\"" + userPrincipal + "\"").append(LINE_SEPARATOR);
|
||||
builder.append("useKeytab=\"" + keyTabPath + "\"").append(LINE_SEPARATOR);
|
||||
builder.append("debug=true;").append(LINE_SEPARATOR);
|
||||
builder.append("};").append(LINE_SEPARATOR);
|
||||
} else {
|
||||
builder.append(module.getName()).append(" {").append(LINE_SEPARATOR);
|
||||
builder.append(SUN_LOGIN_MODULE).append(LINE_SEPARATOR);
|
||||
builder.append("useKeyTab=true").append(LINE_SEPARATOR);
|
||||
builder.append("keyTab=\"" + keyTabPath + "\"").append(LINE_SEPARATOR);
|
||||
builder.append("principal=\"" + userPrincipal + "\"").append(LINE_SEPARATOR);
|
||||
builder.append("useTicketCache=false").append(LINE_SEPARATOR);
|
||||
builder.append("storeKey=true").append(LINE_SEPARATOR);
|
||||
builder.append("debug=true;").append(LINE_SEPARATOR);
|
||||
builder.append("};").append(LINE_SEPARATOR);
|
||||
}
|
||||
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
public static void securityPrepare(String principal, String keyTabFile) throws IOException {
|
||||
// String filePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator;
|
||||
String filePath = "/home/rsoft/config/";
|
||||
String krbFile = filePath + "krb5.conf";
|
||||
String userKeyTableFile = filePath + keyTabFile;
|
||||
|
||||
// windows路径下分隔符替换
|
||||
userKeyTableFile = userKeyTableFile.replace("\\", "\\\\");
|
||||
krbFile = krbFile.replace("\\", "\\\\");
|
||||
|
||||
LoginUtil.setKrb5Config(krbFile);
|
||||
LoginUtil.setZookeeperServerPrincipal("zookeeper/hadoop.hadoop.com");
|
||||
LoginUtil.setJaasFile(principal, userKeyTableFile);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check security mode
|
||||
*
|
||||
* @return boolean
|
||||
*/
|
||||
public static Boolean isSecurityModel() {
|
||||
Boolean isSecurity = false;
|
||||
// String krbFilePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator + "kafkaSecurityMode";
|
||||
String krbFilePath = "/home/rsoft/config/kafkaSecurityMode";
|
||||
Properties securityProps = new Properties();
|
||||
|
||||
// file does not exist.
|
||||
if (!isFileExists(krbFilePath)) {
|
||||
return isSecurity;
|
||||
}
|
||||
|
||||
try {
|
||||
securityProps.load(new FileInputStream(krbFilePath));
|
||||
|
||||
if ("yes".equalsIgnoreCase(securityProps.getProperty("kafka.client.security.mode")))
|
||||
{
|
||||
isSecurity = true;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.info("The Exception occured : {}.", e);
|
||||
}
|
||||
|
||||
return isSecurity;
|
||||
}
|
||||
|
||||
/*
|
||||
* 判断文件是否存在
|
||||
*/
|
||||
private static boolean isFileExists(String fileName) {
|
||||
File file = new File(fileName);
|
||||
|
||||
return file.exists();
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,24 +1,40 @@
|
|||
package org.dromara.data2es.config;
|
||||
|
||||
import org.dromara.data2es.handler.RedisExpireListener;
|
||||
import org.dromara.data2es.handler.RedisExpireRecoveryHandler;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.data.redis.connection.RedisConnectionFactory;
|
||||
import org.springframework.data.redis.listener.KeyExpirationEventMessageListener;
|
||||
import org.springframework.data.redis.listener.PatternTopic;
|
||||
import org.springframework.data.redis.listener.RedisMessageListenerContainer;
|
||||
|
||||
@Configuration
|
||||
public class RedisListenerConfig {
|
||||
|
||||
@Bean
|
||||
RedisMessageListenerContainer listenerContainer(RedisConnectionFactory connectionFactory) {
|
||||
RedisMessageListenerContainer listenerContainer = new RedisMessageListenerContainer();
|
||||
listenerContainer.setConnectionFactory(connectionFactory);
|
||||
return listenerContainer;
|
||||
RedisMessageListenerContainer listenerContainer(
|
||||
RedisConnectionFactory connectionFactory,
|
||||
RedisExpireRecoveryHandler recoveryHandler) {
|
||||
|
||||
RedisMessageListenerContainer container = new RedisMessageListenerContainer();
|
||||
container.setConnectionFactory(connectionFactory);
|
||||
|
||||
// 添加连接监听器用于故障转移恢复
|
||||
container.addMessageListener(recoveryHandler, new PatternTopic("__keyspace@*__:expired"));
|
||||
return container;
|
||||
}
|
||||
|
||||
@Bean
|
||||
KeyExpirationEventMessageListener redisKeyExpirationListener(RedisMessageListenerContainer listenerContainer) {
|
||||
return new RedisExpireListener(listenerContainer);
|
||||
KeyExpirationEventMessageListener redisKeyExpirationListener(
|
||||
RedisMessageListenerContainer listenerContainer,
|
||||
RedisExpireRecoveryHandler recoveryHandler) {
|
||||
|
||||
return new RedisExpireListener(listenerContainer, recoveryHandler);
|
||||
}
|
||||
|
||||
@Bean
|
||||
RedisExpireRecoveryHandler redisExpireRecoveryHandler() {
|
||||
return new RedisExpireRecoveryHandler();
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package org.dromara.data2es.controller;
|
||||
|
||||
import cn.hutool.core.bean.BeanUtil;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.dubbo.config.annotation.DubboReference;
|
||||
import org.dromara.common.core.domain.R;
|
||||
import org.dromara.common.web.core.BaseController;
|
||||
|
|
@ -21,6 +22,7 @@ import java.util.Objects;
|
|||
|
||||
@RequestMapping("device")
|
||||
@RestController
|
||||
@Slf4j
|
||||
public class DeviceInfoController extends BaseController {
|
||||
|
||||
@DubboReference
|
||||
|
|
@ -34,6 +36,10 @@ public class DeviceInfoController extends BaseController {
|
|||
return R.fail("参数为空");
|
||||
}
|
||||
Object dataList = params.get("dataList");
|
||||
Object infoSource = params.get("infoSource");
|
||||
if(Objects.isNull(infoSource)){
|
||||
return R.fail("参数 [infoSource] 为空");
|
||||
}
|
||||
if(Objects.isNull(dataList)){
|
||||
return R.fail("参数 [dataList] 为空");
|
||||
}
|
||||
|
|
@ -42,7 +48,10 @@ public class DeviceInfoController extends BaseController {
|
|||
return R.fail("单次数据超过了100条");
|
||||
}
|
||||
List<RemoteDeviceBo> list = BeanUtil.copyToList(dataList1, RemoteDeviceBo.class);
|
||||
|
||||
for (RemoteDeviceBo deviceBo : list) {
|
||||
deviceBo.setInfoSource(params.get("infoSource").toString());
|
||||
}
|
||||
log.error("插入设备记录={}",list.toString());
|
||||
boolean inserted = deviceService.batchSaveDevice(list);
|
||||
|
||||
if(inserted) {
|
||||
|
|
|
|||
|
|
@ -27,7 +27,17 @@ public class GpsInfoEntity implements Serializable {
|
|||
*/
|
||||
private String deviceType;
|
||||
|
||||
private String zzjgdm;
|
||||
|
||||
private String zzjgmc;
|
||||
|
||||
private String policeNo;
|
||||
|
||||
private String policeName;
|
||||
|
||||
private String phoneNum;
|
||||
|
||||
private String carNum;
|
||||
|
||||
private Double[] location;
|
||||
|
||||
|
|
@ -45,5 +55,7 @@ public class GpsInfoEntity implements Serializable {
|
|||
//地市代码 3401,3402
|
||||
private String infoSource;
|
||||
|
||||
private Integer online;
|
||||
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
package org.dromara.data2es.dubbo;
|
||||
|
||||
import cn.hutool.core.bean.BeanUtil;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import org.apache.dubbo.config.annotation.DubboService;
|
||||
import org.dromara.common.core.domain.R;
|
||||
|
|
@ -21,6 +22,6 @@ public class RemoteDataToEsServiceImpl implements RemoteDataToEsService {
|
|||
|
||||
@Override
|
||||
public R saveDataBatch(List<RemoteGpsInfo> gpsInfoList) {
|
||||
return gpsService.saveDataBatch(MapstructUtils.convert(gpsInfoList, EsGpsInfoVO2.class));
|
||||
return gpsService.saveDataBatch(BeanUtil.copyToList(gpsInfoList, EsGpsInfoVO2.class));
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,69 +2,162 @@ package org.dromara.data2es.handler;
|
|||
|
||||
import cn.hutool.core.bean.BeanUtil;
|
||||
import cn.hutool.json.JSONObject;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.dromara.common.core.utils.RedisConstants;
|
||||
import org.dromara.common.redis.utils.RedisUtils;
|
||||
import org.dromara.data2es.controller.DataToEsController;
|
||||
import org.dromara.data2es.domain.EsGpsInfoVO2;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.redisson.Redisson;
|
||||
import org.redisson.api.RLock;
|
||||
import org.redisson.api.RTopic;
|
||||
import org.redisson.api.RedissonClient;
|
||||
import org.redisson.connection.ConnectionListener;
|
||||
import org.redisson.connection.ConnectionManager;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.beans.factory.annotation.Qualifier;
|
||||
import org.springframework.data.redis.connection.Message;
|
||||
import org.springframework.data.redis.connection.MessageListener;
|
||||
import org.springframework.data.redis.listener.KeyExpirationEventMessageListener;
|
||||
import org.springframework.data.redis.listener.RedisMessageListenerContainer;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import java.util.Date;
|
||||
import javax.annotation.PostConstruct;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* <p>description: </p>
|
||||
*
|
||||
* @author chenle
|
||||
* @date 2021-11-08 16:40
|
||||
*/
|
||||
@Component
|
||||
@Slf4j
|
||||
public class RedisExpireListener extends KeyExpirationEventMessageListener {
|
||||
|
||||
private final RedisExpireRecoveryHandler recoveryHandler;
|
||||
|
||||
@Autowired
|
||||
DataToEsController dataToEsController;
|
||||
private volatile boolean active = true;
|
||||
|
||||
Logger logger = LoggerFactory.getLogger(RedisExpireListener.class);
|
||||
public RedisExpireListener(
|
||||
RedisMessageListenerContainer listenerContainer,
|
||||
RedisExpireRecoveryHandler recoveryHandler) {
|
||||
|
||||
|
||||
/**
|
||||
* Creates new {@link MessageListener} for {@code __keyevent@*__:expired} messages.
|
||||
*
|
||||
* @param listenerContainer must not be {@literal null}.
|
||||
*/
|
||||
public RedisExpireListener(RedisMessageListenerContainer listenerContainer) {
|
||||
super(listenerContainer);
|
||||
this.recoveryHandler = recoveryHandler;
|
||||
recoveryHandler.registerListener(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init() {
|
||||
try {
|
||||
super.init();
|
||||
log.info("Redis过期监听器初始化成功");
|
||||
} catch (Exception e) {
|
||||
log.error("监听器初始化失败", e);
|
||||
}
|
||||
}
|
||||
|
||||
public void reconnect() {
|
||||
if (!active) return;
|
||||
|
||||
try {
|
||||
log.info("尝试重新注册过期事件监听器...");
|
||||
// 停止当前监听
|
||||
super.destroy();
|
||||
// 重新初始化
|
||||
super.init();
|
||||
log.info("过期事件监听器重新注册成功");
|
||||
} catch (Exception e) {
|
||||
log.error("重新注册监听器失败", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onMessage(Message message, byte[] pattern) {
|
||||
if (!active) return;
|
||||
|
||||
String expireKey = message.toString();
|
||||
if(StringUtils.isNotEmpty(expireKey) &&
|
||||
expireKey.startsWith(RedisConstants.ORG_CODE_PRE)){
|
||||
String[] split = expireKey.split(":");
|
||||
EsGpsInfoVO2 esGpsInfoVO2 = new EsGpsInfoVO2();
|
||||
esGpsInfoVO2.setDeviceType(split[2]);
|
||||
esGpsInfoVO2.setDeviceCode(split[3]);
|
||||
log.info("过期的Key={}", expireKey);
|
||||
|
||||
if (StringUtils.isNotEmpty(expireKey) &&
|
||||
expireKey.startsWith(RedisConstants.ORG_CODE_PRE)) {
|
||||
|
||||
log.info("在线定位过期的Key={}", expireKey);
|
||||
handleExpiredEvent(expireKey);
|
||||
}
|
||||
}
|
||||
|
||||
private void handleExpiredEvent(String expiredKey) {
|
||||
RedissonClient redisson = RedisUtils.getClient();
|
||||
RLock lock = redisson.getLock("LOCK:" + expiredKey);
|
||||
|
||||
try {
|
||||
if (lock.tryLock(0, 30, TimeUnit.SECONDS)) {
|
||||
// 实际业务逻辑
|
||||
String[] split = expiredKey.split(":");
|
||||
String zzjgdm = split[1];
|
||||
String deviceType = split[2];
|
||||
String deviceCode = split[3];
|
||||
if(StringUtils.isNotEmpty(zzjgdm)) {
|
||||
JSONObject object = RedisUtils.getBucket(RedisConstants.ONLINE_USERS + zzjgdm + ":"
|
||||
+ deviceType+":"+deviceCode);
|
||||
|
||||
if ("05".equals(deviceType) ) {
|
||||
return;
|
||||
}
|
||||
|
||||
log.info("处理过期Key: {}", expiredKey);
|
||||
JSONObject object = RedisUtils.getBucket(RedisConstants.ONLINE_USERS +zzjgdm +":"+ deviceType + ":" + deviceCode);
|
||||
|
||||
if (Objects.isNull(object)) {
|
||||
log.info("redis key={},Object=null,deviceType={},deviceCode={}",
|
||||
expiredKey, deviceType, deviceCode);
|
||||
return;
|
||||
}
|
||||
|
||||
EsGpsInfoVO2 gpsInfo = BeanUtil.toBean(object, EsGpsInfoVO2.class);
|
||||
gpsInfo.setGpsTime(new Date());
|
||||
gpsInfo.setOnline(0);
|
||||
dataToEsController.saveGpsInfo(gpsInfo);
|
||||
log.info("处理完成: key={}", expiredKey);
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
log.error("处理过期事件被中断", e);
|
||||
} catch (Exception e) {
|
||||
log.error("处理过期事件异常", e);
|
||||
} finally {
|
||||
if (lock.isHeldByCurrentThread()) {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
logger.info("redis key expired:key={}", expireKey);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void destroy() {
|
||||
active = false;
|
||||
try {
|
||||
super.destroy();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
log.info("Redis过期监听器已停止");
|
||||
}
|
||||
|
||||
// 添加连接状态监听(使用Redisson事件总线)
|
||||
@PostConstruct
|
||||
public void addSentinelConnectionListener() {
|
||||
try {
|
||||
RedissonClient redisson = RedisUtils.getClient();
|
||||
|
||||
// 订阅Redisson连接事件
|
||||
RTopic connectionEvents = redisson.getTopic("__redisson_connection_event");
|
||||
connectionEvents.addListener(String.class, (channel, msg) -> {
|
||||
if ("CONNECTED".equals(msg)) {
|
||||
log.info("Redis连接已建立: {}", msg);
|
||||
// 标记需要恢复监听
|
||||
recoveryHandler.markReconnected();
|
||||
} else if ("DISCONNECTED".equals(msg)) {
|
||||
log.warn("Redis连接断开: {}", msg);
|
||||
}
|
||||
});
|
||||
|
||||
log.info("已注册Redisson连接事件监听器");
|
||||
} catch (Exception e) {
|
||||
log.warn("无法添加Redisson连接事件监听器", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue