Compare commits

...

2 Commits

Author SHA1 Message Date
luyya 91a54e2d26 省厅版本位置汇聚对接华为认证的kafka ES 2025-07-08 14:50:53 +08:00
luyya 040885e507 省厅版本位置汇聚对接华为认证的kafka ES 2025-07-08 11:02:13 +08:00
17 changed files with 358 additions and 23 deletions

View File

@ -89,12 +89,12 @@
<id>prod</id>
<properties>
<profiles.active>prod</profiles.active>
<nacos.server>127.0.0.1:8848</nacos.server>
<nacos.server>53.16.17.16:8848</nacos.server>
<nacos.discovery.group>DEFAULT_GROUP</nacos.discovery.group>
<nacos.config.group>DEFAULT_GROUP</nacos.config.group>
<nacos.username>nacos</nacos.username>
<nacos.password>nacos</nacos.password>
<logstash.address>127.0.0.1:4560</logstash.address>
<nacos.password>Ycgis!2509</nacos.password>
<logstash.address>53.16.17.16:4560</logstash.address>
</properties>
</profile>
</profiles>

View File

@ -130,6 +130,8 @@ public class LoginUser implements Serializable {
*/
private String deviceType;
private String manageDeptId;
/**
* id
*/

View File

@ -49,10 +49,15 @@ import static org.apache.dubbo.metadata.report.support.Constants.DEFAULT_METADAT
public class RedisMetadataReport extends AbstractMetadataReport {
private static final String REDIS_DATABASE_KEY = "database";
private static final String SENTINEL_KEY = "sentinel";
private static final ErrorTypeAwareLogger logger = LoggerFactory.getErrorTypeAwareLogger(RedisMetadataReport.class);
// protected , for test
protected JedisPool pool;
protected JedisSentinelPool sentinelPool;
private Set<HostAndPort> jedisClusterNodes;
private int timeout;
private String password;
@ -75,6 +80,14 @@ public class RedisMetadataReport extends AbstractMetadataReport {
for (URL tmpUrl : urls) {
jedisClusterNodes.add(new HostAndPort(tmpUrl.getHost(), tmpUrl.getPort()));
}
} else if (url.getParameter(SENTINEL_KEY,false)) {
Set<String> sentinels = new HashSet<>();
List<URL> urls = url.getBackupUrls();
for (URL tmpUrl : urls) {
sentinels.add(tmpUrl.getHost()+":"+ tmpUrl.getPort());
}
int database = url.getParameter(REDIS_DATABASE_KEY, 0);
sentinelPool = new JedisSentinelPool("mymaster",sentinels ,new GenericObjectPoolConfig<>(), timeout, password, database);
} else {
int database = url.getParameter(REDIS_DATABASE_KEY, 0);
pool = new JedisPool(new JedisPoolConfig(), url.getHost(), url.getPort(), timeout, password, database);
@ -128,11 +141,25 @@ public class RedisMetadataReport extends AbstractMetadataReport {
private void storeMetadata(BaseMetadataIdentifier metadataIdentifier, String v) {
if (pool != null) {
storeMetadataStandalone(metadataIdentifier, v);
}else if(sentinelPool != null) {
storeMetadataInSentinel(metadataIdentifier, v);
} else {
storeMetadataInCluster(metadataIdentifier, v);
}
}
private void storeMetadataInSentinel(BaseMetadataIdentifier metadataIdentifier, String v) {
try (Jedis jedisSentinel = sentinelPool.getResource()) {
jedisSentinel.set(metadataIdentifier.getUniqueKey(KeyTypeEnum.UNIQUE_KEY), v, jedisParams);
} catch (Throwable e) {
String msg =
"Failed to put " + metadataIdentifier + " to redis cluster " + v + ", cause: " + e.getMessage();
logger.error(TRANSPORT_FAILED_RESPONSE, "", "", msg, e);
throw new RpcException(msg, e);
}
}
private void storeMetadataInCluster(BaseMetadataIdentifier metadataIdentifier, String v) {
try (JedisCluster jedisCluster =
new JedisCluster(jedisClusterNodes, timeout, timeout, 2, password, new GenericObjectPoolConfig<>())) {
@ -158,11 +185,24 @@ public class RedisMetadataReport extends AbstractMetadataReport {
private void deleteMetadata(BaseMetadataIdentifier metadataIdentifier) {
if (pool != null) {
deleteMetadataStandalone(metadataIdentifier);
}else if(sentinelPool != null) {
deleteMetadataSentinel(metadataIdentifier);
} else {
deleteMetadataInCluster(metadataIdentifier);
}
}
private void deleteMetadataSentinel(BaseMetadataIdentifier metadataIdentifier) {
try (Jedis jedisSentinel = sentinelPool.getResource()) {
jedisSentinel.del(metadataIdentifier.getUniqueKey(KeyTypeEnum.UNIQUE_KEY));
} catch (Throwable e) {
String msg = "Failed to delete " + metadataIdentifier + " from redis , cause: " + e.getMessage();
logger.error(TRANSPORT_FAILED_RESPONSE, "", "", msg, e);
throw new RpcException(msg, e);
}
}
private void deleteMetadataInCluster(BaseMetadataIdentifier metadataIdentifier) {
try (JedisCluster jedisCluster =
new JedisCluster(jedisClusterNodes, timeout, timeout, 2, password, new GenericObjectPoolConfig<>())) {
@ -187,11 +227,24 @@ public class RedisMetadataReport extends AbstractMetadataReport {
private String getMetadata(BaseMetadataIdentifier metadataIdentifier) {
if (pool != null) {
return getMetadataStandalone(metadataIdentifier);
}else if(sentinelPool != null) {
return getMetadataSentinel(metadataIdentifier);
} else {
return getMetadataInCluster(metadataIdentifier);
}
}
private String getMetadataSentinel(BaseMetadataIdentifier metadataIdentifier) {
try (Jedis jedisSentinel = sentinelPool.getResource()) {
return jedisSentinel.get(metadataIdentifier.getUniqueKey(KeyTypeEnum.UNIQUE_KEY));
} catch (Throwable e) {
String msg = "Failed to get " + metadataIdentifier + " from redis , cause: " + e.getMessage();
logger.error(TRANSPORT_FAILED_RESPONSE, "", "", msg, e);
throw new RpcException(msg, e);
}
}
private String getMetadataInCluster(BaseMetadataIdentifier metadataIdentifier) {
try (JedisCluster jedisCluster =
new JedisCluster(jedisClusterNodes, timeout, timeout, 2, password, new GenericObjectPoolConfig<>())) {
@ -243,6 +296,8 @@ public class RedisMetadataReport extends AbstractMetadataReport {
private boolean storeMapping(String key, String field, String value, String ticket) {
if (pool != null) {
return storeMappingStandalone(key, field, value, ticket);
}else if(sentinelPool != null) {
return storeMappingSentinel(key, field, value, ticket);
} else {
return storeMappingInCluster(key, field, value, ticket);
}
@ -278,6 +333,33 @@ public class RedisMetadataReport extends AbstractMetadataReport {
return false;
}
/**
* use 'watch' to implement cas.
* Find information about slot distribution by key.
*/
private boolean storeMappingSentinel(String key, String field, String value, String ticket) {
try (Jedis jedisSentinel = sentinelPool.getResource()) {
jedisSentinel.watch(key);
String oldValue = jedisSentinel.hget(key, field);
if (null == oldValue || null == ticket || oldValue.equals(ticket)) {
Transaction transaction = jedisSentinel.multi();
transaction.hset(key, field, value);
List<Object> result = transaction.exec();
if (null != result) {
jedisSentinel.publish(buildPubSubKey(), field);
return true;
}
}
jedisSentinel.unwatch();
} catch (Throwable e) {
String msg = "Failed to put " + key + ":" + field + " to redis " + value + ", cause: " + e.getMessage();
logger.error(TRANSPORT_FAILED_RESPONSE, "", "", msg, e);
throw new RpcException(msg, e);
}
return false;
}
/**
* use 'watch' to implement cas.
* Find information about slot distribution by key.
@ -339,6 +421,8 @@ public class RedisMetadataReport extends AbstractMetadataReport {
private String getMappingData(String key, String field) {
if (pool != null) {
return getMappingDataStandalone(key, field);
}else if(sentinelPool != null) {
return getMappingDataSentinel(key, field);
} else {
return getMappingDataInCluster(key, field);
}
@ -355,6 +439,17 @@ public class RedisMetadataReport extends AbstractMetadataReport {
}
}
private String getMappingDataSentinel(String key, String field) {
try (Jedis jedisSentinel = sentinelPool.getResource()) {
return jedisSentinel.hget(key, field);
} catch (Throwable e) {
String msg = "Failed to get " + key + ":" + field + " from redis , cause: " + e.getMessage();
logger.error(TRANSPORT_FAILED_RESPONSE, "", "", msg, e);
throw new RpcException(msg, e);
}
}
private String getMappingDataStandalone(String key, String field) {
try (Jedis jedis = pool.getResource()) {
return jedis.hget(key, field);
@ -502,6 +597,14 @@ public class RedisMetadataReport extends AbstractMetadataReport {
logger.error(TRANSPORT_FAILED_RESPONSE, "", "", msg, e);
throw new RpcException(msg, e);
}
} else if (sentinelPool != null) {
try (Jedis jedisSentinel = sentinelPool.getResource()) {
jedisSentinel.subscribe(notifySub, path);
} catch (Throwable e) {
String msg = "Failed to subscribe " + path + ", cause: " + e.getMessage();
logger.error(TRANSPORT_FAILED_RESPONSE, "", "", msg, e);
throw new RpcException(msg, e);
}
} else {
try (JedisCluster jedisCluster = new JedisCluster(
jedisClusterNodes, timeout, timeout, 2, password, new GenericObjectPoolConfig<>())) {

View File

@ -23,15 +23,27 @@ dubbo:
address: redis://${spring.data.redis.host}:${spring.data.redis.port}
group: DUBBO_GROUP
username: dubbo
password: ${spring.data.redis.password}
password: ruoyi123
# 集群开关
cluster: false
sentinel: true
parameters:
namespace: ${spring.profiles.active}
database: ${spring.data.redis.database}
timeout: ${spring.data.redis.timeout}
# 集群地址 cluster 为 true 生效
backup: 127.0.0.1:6379,127.0.0.1:6381
backup: 53.16.17.13:26380,53.16.17.14:26380,53.16.17.16:26380
# metadata-report:
# address: redis://${spring.data.redis.host}:${spring.data.redis.port}
# group: DUBBO_GROUP
# username: dubbo
# password: ${spring.data.redis.password}
# # 集群开关
# cluster: false
# parameters:
# namespace: ${spring.profiles.active}
# database: ${spring.data.redis.database}
# timeout: ${spring.data.redis.timeout}
# # 集群地址 cluster 为 true 生效
# backup: 127.0.0.1:6379,127.0.0.1:6381
# 消费者相关配置
consumer:
# 结果缓存(LRU算法)
@ -43,3 +55,12 @@ dubbo:
retries: 0
# 初始化检查
check: false
logging:
level:
# 设置 Dubbo 核心包的日志级别为 DEBUG
org.apache.dubbo: DEBUG
# 如果需要更细粒度的调试,可指定元数据报告模块
org.apache.dubbo.metadata: DEBUG
# Redis 客户端日志(可选)
io.lettuce.core: WARN # 避免 Redis 连接日志过多

View File

@ -38,12 +38,12 @@ public enum DataScopeType {
/**
*
*/
DEPT("3", " #{#deptName} = #{#user.deptId} ", " 1 = 0 "),
DEPT("3", " #{#deptName} = #{#user.manageDeptId} ", " 1 = 0 "),
/**
*
*/
DEPT_AND_CHILD("4", " #{#deptName} IN ( #{@sdss.getDeptAndChild( #user.deptId )} )", " 1 = 0 "),
DEPT_AND_CHILD("4", " #{#deptName} IN ( #{@sdss.getDeptAndChild( #user.manageDeptId )} )", " 1 = 0 "),
/**
*

View File

@ -35,6 +35,8 @@ public class LoginHelper {
public static final String USER_KEY = "userId";
public static final String USER_NAME_KEY = "userName";
public static final String DEPT_KEY = "deptId";
public static final String MANAGE_DEPT__KEY = "manageDeptId";
public static final String DEPT_NAME_KEY = "deptName";
public static final String DEPT_CATEGORY_KEY = "deptCategory";
public static final String CLIENT_KEY = "clientid";
@ -53,6 +55,7 @@ public class LoginHelper {
.setExtra(USER_KEY, loginUser.getUserId())
.setExtra(USER_NAME_KEY, loginUser.getUsername())
.setExtra(DEPT_KEY, loginUser.getDeptId())
.setExtra(MANAGE_DEPT__KEY,loginUser.getManageDeptId())
.setExtra(DEPT_NAME_KEY, loginUser.getDeptName())
.setExtra(DEPT_CATEGORY_KEY, loginUser.getDeptCategory())
);

View File

@ -131,6 +131,13 @@
<artifactId>elasticsearch-rest-client</artifactId>
<version>7.14.0</version>
</dependency>
<dependency>
<groupId>org.elasticsearch.client</groupId>
<artifactId>elasticsearch-rest-client</artifactId>
<version>7.6.0-hw-ei-302002</version>
</dependency>
<dependency>
<groupId>org.elasticsearch.client</groupId>
<artifactId>elasticsearch-rest-high-level-client</artifactId>
@ -147,6 +154,12 @@
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>2.4.0-hw-ei-302002</version>
</dependency>
<!-- kafka -->
<dependency>

View File

@ -20,10 +20,11 @@ import java.util.List;
/**
* restHighLevelClient
*/
@Slf4j
/*@Slf4j
@Data
@Configuration
@ConfigurationProperties(prefix = "elasticsearch")
@ConfigurationProperties(prefix = "elasticsearch")*/
public class ElasticsearchConfig {
// es host ip 地址(集群)
@ -85,7 +86,7 @@ public class ElasticsearchConfig {
});
restHighLevelClient = new RestHighLevelClient(builder);
} catch (NumberFormatException e) {
log.error("ES 连接池初始化异常");
// log.error("ES 连接池初始化异常");
}
return restHighLevelClient;
}

View File

@ -0,0 +1,40 @@
package org.dromara.data2es.config;
import org.dromara.data2es.util.GenerateEnumUtil;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.hwclient.HwRestClient;
import org.springframework.context.annotation.Bean;
import org.springframework.stereotype.Component;
import java.time.LocalDate;
import java.time.format.DateTimeFormatter;
/**
* <p>description: </p>
*
* @author chenle
* @date 2021-07-05 18:22
*/
@Component(value = "esConfig")
public class EsConfig {
private String prefix = "gpsinfo";
public String indexNameByDay(){
return prefix+ LocalDate.now().format(DateTimeFormatter.ofPattern("yyyyMMdd"));
}
@Bean(destroyMethod = "close",name = "restHighLevelClient")
public RestHighLevelClient restClient() {
// String configPath = System.getProperty("user.dir") + File.separator+ "app_data2es_aq" + File.separator + "conf" + File.separator;
String configPath = "/rsoft/";
// KAFKA("KafkaClient"), ZOOKEEPER("Client");
GenerateEnumUtil.addEnum(LoginUtil.Module.class,"KAFKA","KafkaClient");
GenerateEnumUtil.addEnum(LoginUtil.Module.class,"ZOOKEEPER","Client");
HwRestClient hwRestClient = new HwRestClient(configPath);
RestHighLevelClient highLevelClient = new RestHighLevelClient(hwRestClient.getRestClientBuilder());
return highLevelClient;
}
}

View File

@ -24,11 +24,11 @@ public class KafkaConfig {
private Logger logger = LoggerFactory.getLogger(KafkaConfig.class);
// private String kafkaServers = "140.168.2.31:21007,140.168.2.32:21007,140.168.2.33:21007";
private String kafkaServers = "140.168.2.31:21007,140.168.2.32:21007,140.168.2.33:21007"; //省厅 kafka
// private String kafkaServers = "53.208.61.105:6667,53.208.61.106:6667,53.208.61.107:6667";//六安GA网
// private String kafkaServers = "34.72.62.93:9092";//六安视频网
// private String kafkaServers = "127.0.0.1:9092";//本地
private String kafkaServers = "53.207.8.71:9092,53.193.3.15:9092,53.160.0.237:9092,53.104.56.58:9092,53.128.22.61:9092";//省厅 马伟提供
// private String kafkaServers = "53.207.8.71:9092,53.193.3.15:9092,53.160.0.237:9092,53.104.56.58:9092,53.128.22.61:9092";//省厅 马伟提供
private String groupId = "ruansiProducer";
@ -128,11 +128,11 @@ public class KafkaConfig {
// props.put(kerberosDomainName, "hadoop.hadoop.com");
//设置自定义的分区策略类默认不传key是粘性分区尽量往一个分区中发消息。如果key不为null则默认是按照key的hashcode与 partition的取余来决定哪个partition
//props.put("partitioner.class","com.kafka.myparitioner.CidPartitioner");
props.put(securityProtocol, "SASL_PLAINTEXT");
props.put("sasl.jaas.config", "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"zkxc\" password=\"zkxcKafka07252023\";");
props.put("sasl.mechanism", "SCRAM-SHA-256");
KafkaProducer<String, String> producer = new KafkaProducer<>(props);
// KafkaProducer producer = new KafkaProducer<>(props);
// props.put(securityProtocol, "SASL_PLAINTEXT");
// props.put("sasl.jaas.config", "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"zkxc\" password=\"zkxcKafka07252023\";");
// props.put("sasl.mechanism", "SCRAM-SHA-256");
// KafkaProducer<String, String> producer = new KafkaProducer<>(props);
KafkaProducer producer = new KafkaProducer<>(props);
return producer;
}

View File

@ -25,7 +25,7 @@ public class KafkaSecurityUtil {
/**
*
*/
private static final String USER_PRINCIPAL = "aqdsj_ruansi@HADOOP.COM";
private static final String USER_PRINCIPAL = "yhy_ahrs_rcw@HADOOP.COM";
public static void securityPrepare() throws IOException
{
@ -34,10 +34,10 @@ public class KafkaSecurityUtil {
//String krbFile = filePath + "krb5.conf";
//ClassPathResource classPathResource = new ClassPathResource("krb5.conf");
//String krbFile = classPathResource.getAbsolutePath();
String krbFile = "/gpsstore/krb5.conf";
String krbFile = "/rsoft/krb5.conf";
// String userKeyTableFile = filePath + USER_KEYTAB_FILE;
//ClassPathResource classPathResource1 = new ClassPathResource(USER_KEYTAB_FILE);
String userKeyTableFile = "/gpsstore/user.keytab";
String userKeyTableFile = "/rsoft/user.keytab";
//windows路径下分隔符替换
userKeyTableFile = userKeyTableFile.replace("\\", "\\\\");

View File

@ -0,0 +1,146 @@
package org.dromara.data2es.util;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.lang.reflect.*;
import java.util.*;
/**
* <p>description: </p>
*
* @author luya
* @date 2025-07-08
*/
public class GenerateEnumUtil {
static Logger log = LoggerFactory.getLogger(GenerateEnumUtil.class);
/**
*
*
* @param enumClass
* @param enumName ()
* @param params
*/
public static <T extends Enum<?>> void addEnum(Class<T> enumClass, String enumName, Object... params) {
sanityChecks(enumClass, enumName);
Field valuesField = null;
Field[] fields = enumClass.getDeclaredFields();
List<Class<?>> paramTypes = new LinkedList<>();
for (Field field : fields) {
if (field.isEnumConstant() && field.getName().equals(enumName)) {
log.warn("该枚举类已经存在!");
return;
}
if (field.isSynthetic() && field.getName().contains("$VALUES")) {
valuesField = field;
}
if (!field.isSynthetic() && !field.isEnumConstant()) {
paramTypes.add(field.getType());
}
}
if (valuesField == null) {
throw new RuntimeException("未获取到合成类型");
}
try {
// 设置可访问
valuesField.setAccessible(true);
T[] previousValues = (T[]) valuesField.get(null);
List<T> values = new ArrayList<>(Arrays.asList(previousValues));
// 创建新枚举实例
T newValue = makeEnum(enumClass, enumName, values.size(), paramTypes.toArray(new Class[0]), params);
values.add(newValue);
// 更新枚举数组
setFailsafeFieldValue(valuesField, null, values.toArray((T[]) Array.newInstance(enumClass, 0)));
// 清理枚举缓存
cleanEnumCache(enumClass);
} catch (Exception e) {
throw new RuntimeException("添加枚举失败: " + e.getMessage(), e);
}
}
/**
*
*/
private static <T extends Enum<?>> void sanityChecks(Class<T> enumClass, String enumName) {
if (!Enum.class.isAssignableFrom(enumClass)) {
throw new RuntimeException(enumClass + " 不是一个枚举类。");
}
if (enumName == null || enumName.trim().isEmpty()) {
throw new RuntimeException("枚举名称不能为空");
}
}
/**
*
*/
private static <T> T makeEnum(Class<T> enumClass, String enumName, int ordinal,
Class<?>[] additionalTypes, Object[] additionalValues) throws Exception {
Class<?>[] paramTypes = new Class[additionalTypes.length + 2];
paramTypes[0] = String.class;
paramTypes[1] = int.class;
System.arraycopy(additionalTypes, 0, paramTypes, 2, additionalTypes.length);
// 获取并调用构造器
Constructor<T> constructor = enumClass.getDeclaredConstructor(paramTypes);
constructor.setAccessible(true);
Object[] params = new Object[additionalValues.length + 2];
params[0] = enumName;
params[1] = ordinal;
System.arraycopy(additionalValues, 0, params, 2, additionalValues.length);
return constructor.newInstance(params);
}
/**
* final
*/
private static void setFailsafeFieldValue(Field field, Object target, Object value)
throws NoSuchFieldException, IllegalAccessException {
field.setAccessible(true);
// 处理final修饰符
Field modifiersField = Field.class.getDeclaredField("modifiers");
modifiersField.setAccessible(true);
int originalModifiers = field.getModifiers();
modifiersField.setInt(field, originalModifiers & ~Modifier.FINAL);
// 设置字段值
field.set(target, value);
// 恢复原始修饰符(可选)
modifiersField.setInt(field, originalModifiers);
}
/**
*
*/
private static void cleanEnumCache(Class<?> enumClass)
throws NoSuchFieldException, IllegalAccessException {
blankField(enumClass, "enumConstantDirectory");
blankField(enumClass, "enumConstants");
}
private static void blankField(Class<?> enumClass, String fieldName)
throws NoSuchFieldException, IllegalAccessException {
for (Field field : Class.class.getDeclaredFields()) {
if (field.getName().contains(fieldName)) {
field.setAccessible(true);
setFailsafeFieldValue(field, enumClass, null);
break;
}
}
}
}

View File

@ -103,6 +103,8 @@ public class SysUser extends TenantEntity {
*/
private String remark;
private String manageDeptId;
public SysUser(Long userId) {
this.userId = userId;

View File

@ -108,6 +108,8 @@ public class SysUserBo extends BaseEntity {
*/
private String excludeUserIds;
private String manageDeptId;
public SysUserBo(Long userId) {
this.userId = userId;
}

View File

@ -113,6 +113,7 @@ public class SysUserVo implements Serializable {
*/
private Date createTime;
private String manageDeptId;
/**
*
*/

View File

@ -253,6 +253,7 @@ public class RemoteUserServiceImpl implements RemoteUserService {
loginUser.setTenantId(userVo.getTenantId());
loginUser.setUserId(userVo.getUserId());
loginUser.setDeptId(userVo.getDeptId());
loginUser.setManageDeptId(userVo.getManageDeptId());
loginUser.setUsername(userVo.getUserName());
loginUser.setNickname(userVo.getNickName());
loginUser.setPassword(userVo.getPassword());

View File

@ -40,7 +40,7 @@ spring.sql.init.platform=postgresql
db.num=1
### Connect URL of DB:
db.url.0=jdbc:postgresql://localhost:5432/ypc-config?tcpKeepAlive=true&reWriteBatchedInserts=true&ApplicationName=ruoyi-nacos
db.url.0=jdbc:postgresql://53.16.17.15:5432/ypc-config?tcpKeepAlive=true&reWriteBatchedInserts=true&ApplicationName=ruoyi-nacos
db.user.0=postgres
db.password.0=ycgis
db.pool.config.driverClassName=org.postgresql.Driver