省厅版本位置汇聚对接华为认证的kafka ES

stwzhj
luyya 2025-07-08 14:50:53 +08:00
parent 040885e507
commit 91a54e2d26
10 changed files with 142 additions and 7 deletions

View File

@ -130,6 +130,8 @@ public class LoginUser implements Serializable {
*/ */
private String deviceType; private String deviceType;
private String manageDeptId;
/** /**
* id * id
*/ */

View File

@ -49,10 +49,15 @@ import static org.apache.dubbo.metadata.report.support.Constants.DEFAULT_METADAT
public class RedisMetadataReport extends AbstractMetadataReport { public class RedisMetadataReport extends AbstractMetadataReport {
private static final String REDIS_DATABASE_KEY = "database"; private static final String REDIS_DATABASE_KEY = "database";
private static final String SENTINEL_KEY = "sentinel";
private static final ErrorTypeAwareLogger logger = LoggerFactory.getErrorTypeAwareLogger(RedisMetadataReport.class); private static final ErrorTypeAwareLogger logger = LoggerFactory.getErrorTypeAwareLogger(RedisMetadataReport.class);
// protected , for test // protected , for test
protected JedisPool pool; protected JedisPool pool;
protected JedisSentinelPool sentinelPool;
private Set<HostAndPort> jedisClusterNodes; private Set<HostAndPort> jedisClusterNodes;
private int timeout; private int timeout;
private String password; private String password;
@ -75,6 +80,14 @@ public class RedisMetadataReport extends AbstractMetadataReport {
for (URL tmpUrl : urls) { for (URL tmpUrl : urls) {
jedisClusterNodes.add(new HostAndPort(tmpUrl.getHost(), tmpUrl.getPort())); jedisClusterNodes.add(new HostAndPort(tmpUrl.getHost(), tmpUrl.getPort()));
} }
} else if (url.getParameter(SENTINEL_KEY,false)) {
Set<String> sentinels = new HashSet<>();
List<URL> urls = url.getBackupUrls();
for (URL tmpUrl : urls) {
sentinels.add(tmpUrl.getHost()+":"+ tmpUrl.getPort());
}
int database = url.getParameter(REDIS_DATABASE_KEY, 0);
sentinelPool = new JedisSentinelPool("mymaster",sentinels ,new GenericObjectPoolConfig<>(), timeout, password, database);
} else { } else {
int database = url.getParameter(REDIS_DATABASE_KEY, 0); int database = url.getParameter(REDIS_DATABASE_KEY, 0);
pool = new JedisPool(new JedisPoolConfig(), url.getHost(), url.getPort(), timeout, password, database); pool = new JedisPool(new JedisPoolConfig(), url.getHost(), url.getPort(), timeout, password, database);
@ -128,11 +141,25 @@ public class RedisMetadataReport extends AbstractMetadataReport {
private void storeMetadata(BaseMetadataIdentifier metadataIdentifier, String v) { private void storeMetadata(BaseMetadataIdentifier metadataIdentifier, String v) {
if (pool != null) { if (pool != null) {
storeMetadataStandalone(metadataIdentifier, v); storeMetadataStandalone(metadataIdentifier, v);
}else if(sentinelPool != null) {
storeMetadataInSentinel(metadataIdentifier, v);
} else { } else {
storeMetadataInCluster(metadataIdentifier, v); storeMetadataInCluster(metadataIdentifier, v);
} }
} }
private void storeMetadataInSentinel(BaseMetadataIdentifier metadataIdentifier, String v) {
try (Jedis jedisSentinel = sentinelPool.getResource()) {
jedisSentinel.set(metadataIdentifier.getUniqueKey(KeyTypeEnum.UNIQUE_KEY), v, jedisParams);
} catch (Throwable e) {
String msg =
"Failed to put " + metadataIdentifier + " to redis cluster " + v + ", cause: " + e.getMessage();
logger.error(TRANSPORT_FAILED_RESPONSE, "", "", msg, e);
throw new RpcException(msg, e);
}
}
private void storeMetadataInCluster(BaseMetadataIdentifier metadataIdentifier, String v) { private void storeMetadataInCluster(BaseMetadataIdentifier metadataIdentifier, String v) {
try (JedisCluster jedisCluster = try (JedisCluster jedisCluster =
new JedisCluster(jedisClusterNodes, timeout, timeout, 2, password, new GenericObjectPoolConfig<>())) { new JedisCluster(jedisClusterNodes, timeout, timeout, 2, password, new GenericObjectPoolConfig<>())) {
@ -158,11 +185,24 @@ public class RedisMetadataReport extends AbstractMetadataReport {
private void deleteMetadata(BaseMetadataIdentifier metadataIdentifier) { private void deleteMetadata(BaseMetadataIdentifier metadataIdentifier) {
if (pool != null) { if (pool != null) {
deleteMetadataStandalone(metadataIdentifier); deleteMetadataStandalone(metadataIdentifier);
}else if(sentinelPool != null) {
deleteMetadataSentinel(metadataIdentifier);
} else { } else {
deleteMetadataInCluster(metadataIdentifier); deleteMetadataInCluster(metadataIdentifier);
} }
} }
private void deleteMetadataSentinel(BaseMetadataIdentifier metadataIdentifier) {
try (Jedis jedisSentinel = sentinelPool.getResource()) {
jedisSentinel.del(metadataIdentifier.getUniqueKey(KeyTypeEnum.UNIQUE_KEY));
} catch (Throwable e) {
String msg = "Failed to delete " + metadataIdentifier + " from redis , cause: " + e.getMessage();
logger.error(TRANSPORT_FAILED_RESPONSE, "", "", msg, e);
throw new RpcException(msg, e);
}
}
private void deleteMetadataInCluster(BaseMetadataIdentifier metadataIdentifier) { private void deleteMetadataInCluster(BaseMetadataIdentifier metadataIdentifier) {
try (JedisCluster jedisCluster = try (JedisCluster jedisCluster =
new JedisCluster(jedisClusterNodes, timeout, timeout, 2, password, new GenericObjectPoolConfig<>())) { new JedisCluster(jedisClusterNodes, timeout, timeout, 2, password, new GenericObjectPoolConfig<>())) {
@ -187,11 +227,24 @@ public class RedisMetadataReport extends AbstractMetadataReport {
private String getMetadata(BaseMetadataIdentifier metadataIdentifier) { private String getMetadata(BaseMetadataIdentifier metadataIdentifier) {
if (pool != null) { if (pool != null) {
return getMetadataStandalone(metadataIdentifier); return getMetadataStandalone(metadataIdentifier);
}else if(sentinelPool != null) {
return getMetadataSentinel(metadataIdentifier);
} else { } else {
return getMetadataInCluster(metadataIdentifier); return getMetadataInCluster(metadataIdentifier);
} }
} }
private String getMetadataSentinel(BaseMetadataIdentifier metadataIdentifier) {
try (Jedis jedisSentinel = sentinelPool.getResource()) {
return jedisSentinel.get(metadataIdentifier.getUniqueKey(KeyTypeEnum.UNIQUE_KEY));
} catch (Throwable e) {
String msg = "Failed to get " + metadataIdentifier + " from redis , cause: " + e.getMessage();
logger.error(TRANSPORT_FAILED_RESPONSE, "", "", msg, e);
throw new RpcException(msg, e);
}
}
private String getMetadataInCluster(BaseMetadataIdentifier metadataIdentifier) { private String getMetadataInCluster(BaseMetadataIdentifier metadataIdentifier) {
try (JedisCluster jedisCluster = try (JedisCluster jedisCluster =
new JedisCluster(jedisClusterNodes, timeout, timeout, 2, password, new GenericObjectPoolConfig<>())) { new JedisCluster(jedisClusterNodes, timeout, timeout, 2, password, new GenericObjectPoolConfig<>())) {
@ -243,6 +296,8 @@ public class RedisMetadataReport extends AbstractMetadataReport {
private boolean storeMapping(String key, String field, String value, String ticket) { private boolean storeMapping(String key, String field, String value, String ticket) {
if (pool != null) { if (pool != null) {
return storeMappingStandalone(key, field, value, ticket); return storeMappingStandalone(key, field, value, ticket);
}else if(sentinelPool != null) {
return storeMappingSentinel(key, field, value, ticket);
} else { } else {
return storeMappingInCluster(key, field, value, ticket); return storeMappingInCluster(key, field, value, ticket);
} }
@ -278,6 +333,33 @@ public class RedisMetadataReport extends AbstractMetadataReport {
return false; return false;
} }
/**
* use 'watch' to implement cas.
* Find information about slot distribution by key.
*/
private boolean storeMappingSentinel(String key, String field, String value, String ticket) {
try (Jedis jedisSentinel = sentinelPool.getResource()) {
jedisSentinel.watch(key);
String oldValue = jedisSentinel.hget(key, field);
if (null == oldValue || null == ticket || oldValue.equals(ticket)) {
Transaction transaction = jedisSentinel.multi();
transaction.hset(key, field, value);
List<Object> result = transaction.exec();
if (null != result) {
jedisSentinel.publish(buildPubSubKey(), field);
return true;
}
}
jedisSentinel.unwatch();
} catch (Throwable e) {
String msg = "Failed to put " + key + ":" + field + " to redis " + value + ", cause: " + e.getMessage();
logger.error(TRANSPORT_FAILED_RESPONSE, "", "", msg, e);
throw new RpcException(msg, e);
}
return false;
}
/** /**
* use 'watch' to implement cas. * use 'watch' to implement cas.
* Find information about slot distribution by key. * Find information about slot distribution by key.
@ -339,6 +421,8 @@ public class RedisMetadataReport extends AbstractMetadataReport {
private String getMappingData(String key, String field) { private String getMappingData(String key, String field) {
if (pool != null) { if (pool != null) {
return getMappingDataStandalone(key, field); return getMappingDataStandalone(key, field);
}else if(sentinelPool != null) {
return getMappingDataSentinel(key, field);
} else { } else {
return getMappingDataInCluster(key, field); return getMappingDataInCluster(key, field);
} }
@ -355,6 +439,17 @@ public class RedisMetadataReport extends AbstractMetadataReport {
} }
} }
private String getMappingDataSentinel(String key, String field) {
try (Jedis jedisSentinel = sentinelPool.getResource()) {
return jedisSentinel.hget(key, field);
} catch (Throwable e) {
String msg = "Failed to get " + key + ":" + field + " from redis , cause: " + e.getMessage();
logger.error(TRANSPORT_FAILED_RESPONSE, "", "", msg, e);
throw new RpcException(msg, e);
}
}
private String getMappingDataStandalone(String key, String field) { private String getMappingDataStandalone(String key, String field) {
try (Jedis jedis = pool.getResource()) { try (Jedis jedis = pool.getResource()) {
return jedis.hget(key, field); return jedis.hget(key, field);
@ -502,6 +597,14 @@ public class RedisMetadataReport extends AbstractMetadataReport {
logger.error(TRANSPORT_FAILED_RESPONSE, "", "", msg, e); logger.error(TRANSPORT_FAILED_RESPONSE, "", "", msg, e);
throw new RpcException(msg, e); throw new RpcException(msg, e);
} }
} else if (sentinelPool != null) {
try (Jedis jedisSentinel = sentinelPool.getResource()) {
jedisSentinel.subscribe(notifySub, path);
} catch (Throwable e) {
String msg = "Failed to subscribe " + path + ", cause: " + e.getMessage();
logger.error(TRANSPORT_FAILED_RESPONSE, "", "", msg, e);
throw new RpcException(msg, e);
}
} else { } else {
try (JedisCluster jedisCluster = new JedisCluster( try (JedisCluster jedisCluster = new JedisCluster(
jedisClusterNodes, timeout, timeout, 2, password, new GenericObjectPoolConfig<>())) { jedisClusterNodes, timeout, timeout, 2, password, new GenericObjectPoolConfig<>())) {

View File

@ -23,15 +23,27 @@ dubbo:
address: redis://${spring.data.redis.host}:${spring.data.redis.port} address: redis://${spring.data.redis.host}:${spring.data.redis.port}
group: DUBBO_GROUP group: DUBBO_GROUP
username: dubbo username: dubbo
password: ${spring.data.redis.password} password: ruoyi123
# 集群开关 # 集群开关
cluster: false sentinel: true
parameters: parameters:
namespace: ${spring.profiles.active} namespace: ${spring.profiles.active}
database: ${spring.data.redis.database} database: ${spring.data.redis.database}
timeout: ${spring.data.redis.timeout} timeout: ${spring.data.redis.timeout}
# 集群地址 cluster 为 true 生效 backup: 53.16.17.13:26380,53.16.17.14:26380,53.16.17.16:26380
backup: 127.0.0.1:6379,127.0.0.1:6381 # metadata-report:
# address: redis://${spring.data.redis.host}:${spring.data.redis.port}
# group: DUBBO_GROUP
# username: dubbo
# password: ${spring.data.redis.password}
# # 集群开关
# cluster: false
# parameters:
# namespace: ${spring.profiles.active}
# database: ${spring.data.redis.database}
# timeout: ${spring.data.redis.timeout}
# # 集群地址 cluster 为 true 生效
# backup: 127.0.0.1:6379,127.0.0.1:6381
# 消费者相关配置 # 消费者相关配置
consumer: consumer:
# 结果缓存(LRU算法) # 结果缓存(LRU算法)
@ -43,3 +55,12 @@ dubbo:
retries: 0 retries: 0
# 初始化检查 # 初始化检查
check: false check: false
logging:
level:
# 设置 Dubbo 核心包的日志级别为 DEBUG
org.apache.dubbo: DEBUG
# 如果需要更细粒度的调试,可指定元数据报告模块
org.apache.dubbo.metadata: DEBUG
# Redis 客户端日志(可选)
io.lettuce.core: WARN # 避免 Redis 连接日志过多

View File

@ -38,12 +38,12 @@ public enum DataScopeType {
/** /**
* *
*/ */
DEPT("3", " #{#deptName} = #{#user.deptId} ", " 1 = 0 "), DEPT("3", " #{#deptName} = #{#user.manageDeptId} ", " 1 = 0 "),
/** /**
* *
*/ */
DEPT_AND_CHILD("4", " #{#deptName} IN ( #{@sdss.getDeptAndChild( #user.deptId )} )", " 1 = 0 "), DEPT_AND_CHILD("4", " #{#deptName} IN ( #{@sdss.getDeptAndChild( #user.manageDeptId )} )", " 1 = 0 "),
/** /**
* *

View File

@ -35,6 +35,8 @@ public class LoginHelper {
public static final String USER_KEY = "userId"; public static final String USER_KEY = "userId";
public static final String USER_NAME_KEY = "userName"; public static final String USER_NAME_KEY = "userName";
public static final String DEPT_KEY = "deptId"; public static final String DEPT_KEY = "deptId";
public static final String MANAGE_DEPT__KEY = "manageDeptId";
public static final String DEPT_NAME_KEY = "deptName"; public static final String DEPT_NAME_KEY = "deptName";
public static final String DEPT_CATEGORY_KEY = "deptCategory"; public static final String DEPT_CATEGORY_KEY = "deptCategory";
public static final String CLIENT_KEY = "clientid"; public static final String CLIENT_KEY = "clientid";
@ -53,6 +55,7 @@ public class LoginHelper {
.setExtra(USER_KEY, loginUser.getUserId()) .setExtra(USER_KEY, loginUser.getUserId())
.setExtra(USER_NAME_KEY, loginUser.getUsername()) .setExtra(USER_NAME_KEY, loginUser.getUsername())
.setExtra(DEPT_KEY, loginUser.getDeptId()) .setExtra(DEPT_KEY, loginUser.getDeptId())
.setExtra(MANAGE_DEPT__KEY,loginUser.getManageDeptId())
.setExtra(DEPT_NAME_KEY, loginUser.getDeptName()) .setExtra(DEPT_NAME_KEY, loginUser.getDeptName())
.setExtra(DEPT_CATEGORY_KEY, loginUser.getDeptCategory()) .setExtra(DEPT_CATEGORY_KEY, loginUser.getDeptCategory())
); );

View File

@ -103,6 +103,8 @@ public class SysUser extends TenantEntity {
*/ */
private String remark; private String remark;
private String manageDeptId;
public SysUser(Long userId) { public SysUser(Long userId) {
this.userId = userId; this.userId = userId;

View File

@ -108,6 +108,8 @@ public class SysUserBo extends BaseEntity {
*/ */
private String excludeUserIds; private String excludeUserIds;
private String manageDeptId;
public SysUserBo(Long userId) { public SysUserBo(Long userId) {
this.userId = userId; this.userId = userId;
} }

View File

@ -113,6 +113,7 @@ public class SysUserVo implements Serializable {
*/ */
private Date createTime; private Date createTime;
private String manageDeptId;
/** /**
* *
*/ */

View File

@ -253,6 +253,7 @@ public class RemoteUserServiceImpl implements RemoteUserService {
loginUser.setTenantId(userVo.getTenantId()); loginUser.setTenantId(userVo.getTenantId());
loginUser.setUserId(userVo.getUserId()); loginUser.setUserId(userVo.getUserId());
loginUser.setDeptId(userVo.getDeptId()); loginUser.setDeptId(userVo.getDeptId());
loginUser.setManageDeptId(userVo.getManageDeptId());
loginUser.setUsername(userVo.getUserName()); loginUser.setUsername(userVo.getUserName());
loginUser.setNickname(userVo.getNickName()); loginUser.setNickname(userVo.getNickName());
loginUser.setPassword(userVo.getPassword()); loginUser.setPassword(userVo.getPassword());

View File

@ -40,7 +40,7 @@ spring.sql.init.platform=postgresql
db.num=1 db.num=1
### Connect URL of DB: ### Connect URL of DB:
db.url.0=jdbc:postgresql://localhost:5432/ypc-config?tcpKeepAlive=true&reWriteBatchedInserts=true&ApplicationName=ruoyi-nacos db.url.0=jdbc:postgresql://53.16.17.15:5432/ypc-config?tcpKeepAlive=true&reWriteBatchedInserts=true&ApplicationName=ruoyi-nacos
db.user.0=postgres db.user.0=postgres
db.password.0=ycgis db.password.0=ycgis
db.pool.config.driverClassName=org.postgresql.Driver db.pool.config.driverClassName=org.postgresql.Driver