Compare commits
2 Commits
5f9ecaa366
...
1f906ebb39
| Author | SHA1 | Date |
|---|---|---|
|
|
1f906ebb39 | |
|
|
caf0ee6c5b |
|
|
@ -16,6 +16,7 @@
|
||||||
<module>stwzhj-workflow</module>
|
<module>stwzhj-workflow</module>
|
||||||
<module>stwzhj-data2es</module>
|
<module>stwzhj-data2es</module>
|
||||||
<module>stwzhj-baseToSt</module>
|
<module>stwzhj-baseToSt</module>
|
||||||
|
<module>stwzhj-data2StKafka</module>
|
||||||
</modules>
|
</modules>
|
||||||
|
|
||||||
<artifactId>stwzhj-modules</artifactId>
|
<artifactId>stwzhj-modules</artifactId>
|
||||||
|
|
|
||||||
|
|
@ -96,11 +96,6 @@
|
||||||
<artifactId>stwzhj-api-data2es</artifactId>
|
<artifactId>stwzhj-api-data2es</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.apache.kafka</groupId>
|
|
||||||
<artifactId>kafka-clients</artifactId>
|
|
||||||
<version>2.4.0-hw-ei-302002</version>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.ruansee.app</groupId>
|
<groupId>com.ruansee.app</groupId>
|
||||||
|
|
@ -137,16 +132,48 @@
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.springframework.kafka</groupId>
|
<groupId>org.apache.kafka</groupId>
|
||||||
<artifactId>spring-kafka</artifactId>
|
<artifactId>kafka_2.12</artifactId>
|
||||||
|
<version>3.6.1-h0.cbu.mrs.350.r11</version>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.zookeeper</groupId>
|
||||||
|
<artifactId>zookeeper</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>net.sf.jopt-simple</groupId>
|
||||||
|
<artifactId>jopt-simple</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>com.huawei.mrs</groupId>
|
||||||
|
<artifactId>manager-wc2frm</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>org.apache.kafka</groupId>
|
<groupId>org.apache.kafka</groupId>
|
||||||
<artifactId>kafka-clients</artifactId>
|
<artifactId>kafka-clients</artifactId>
|
||||||
</exclusion>
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.xerial.snappy</groupId>
|
||||||
|
<artifactId>snappy-java</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>com.huawei.mrs</groupId>
|
||||||
|
<artifactId>om-controller-api</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>com.101tec</groupId>
|
||||||
|
<artifactId>zkclient</artifactId>
|
||||||
|
</exclusion>
|
||||||
</exclusions>
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.kafka</groupId>
|
||||||
|
<artifactId>kafka-clients</artifactId>
|
||||||
|
<version>3.6.1-h0.cbu.mrs.350.r11</version>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,6 @@ import com.ruansee.redis.JedisConfig;
|
||||||
import com.ruansee.redis.RedisConfig;
|
import com.ruansee.redis.RedisConfig;
|
||||||
import com.ruansee.redis.RedisUtil;
|
import com.ruansee.redis.RedisUtil;
|
||||||
import com.ruansee.redis.RedissionLockUtil;
|
import com.ruansee.redis.RedissionLockUtil;
|
||||||
import org.dromara.kafka.consumer.config.KafkaPropertiesConfig;
|
|
||||||
import org.redisson.spring.starter.RedissonAutoConfiguration;
|
import org.redisson.spring.starter.RedissonAutoConfiguration;
|
||||||
import org.springframework.boot.SpringApplication;
|
import org.springframework.boot.SpringApplication;
|
||||||
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||||
|
|
@ -25,7 +24,6 @@ import org.springframework.scheduling.annotation.EnableAsync;
|
||||||
*/
|
*/
|
||||||
@SpringBootApplication
|
@SpringBootApplication
|
||||||
@EnableAsync
|
@EnableAsync
|
||||||
@EnableConfigurationProperties({KafkaPropertiesConfig.class})
|
|
||||||
@ServletComponentScan
|
@ServletComponentScan
|
||||||
public class KafkaConsumerApplication {
|
public class KafkaConsumerApplication {
|
||||||
public static void main(String[] args){
|
public static void main(String[] args){
|
||||||
|
|
|
||||||
|
|
@ -1,138 +0,0 @@
|
||||||
package org.dromara.kafka.consumer.config;
|
|
||||||
|
|
||||||
import org.apache.kafka.clients.producer.KafkaProducer;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
import org.springframework.context.annotation.Bean;
|
|
||||||
import org.springframework.stereotype.Component;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.Properties;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* <p>description: </p>
|
|
||||||
*
|
|
||||||
* @author chenle
|
|
||||||
* @date 2021-11-03 14:15
|
|
||||||
*/
|
|
||||||
@Component
|
|
||||||
public class KafkaConfig {
|
|
||||||
|
|
||||||
private Logger logger = LoggerFactory.getLogger(KafkaConfig.class);
|
|
||||||
|
|
||||||
private String kafkaServers = "53.1.212.25:21007,53.1.212.26:21007,53.1.212.27:21007"; //省厅 kafka
|
|
||||||
// private String kafkaServers = "53.208.61.105:6667,53.208.61.106:6667,53.208.61.107:6667";//六安GA网
|
|
||||||
// private String kafkaServers = "34.72.62.93:9092";//六安视频网
|
|
||||||
// private String kafkaServers = "127.0.0.1:9092";//本地
|
|
||||||
// private String kafkaServers = "53.207.8.71:9092,53.193.3.15:9092,53.160.0.237:9092,53.104.56.58:9092,53.128.22.61:9092";//省厅 马伟提供
|
|
||||||
|
|
||||||
private String groupId = "ruansiProducer";
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// Broker地址列表
|
|
||||||
private final String bootstrapServers = "bootstrap.servers";
|
|
||||||
|
|
||||||
// 客户端ID
|
|
||||||
private final String clientId = "client.id";
|
|
||||||
|
|
||||||
// Key序列化类
|
|
||||||
private final String keySerializer = "key.serializer";
|
|
||||||
|
|
||||||
// Value序列化类
|
|
||||||
private final String valueSerializer = "value.serializer";
|
|
||||||
|
|
||||||
// 协议类型:当前支持配置为SASL_PLAINTEXT或者PLAINTEXT
|
|
||||||
private final String securityProtocol = "security.protocol";
|
|
||||||
|
|
||||||
// 服务名
|
|
||||||
private final String saslKerberosServiceName = "sasl.kerberos.service.name";
|
|
||||||
|
|
||||||
// 域名
|
|
||||||
private final String kerberosDomainName = "kerberos.domain.name";
|
|
||||||
|
|
||||||
//默认发送20条消息
|
|
||||||
private final int messageNumToSend = 100;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* 用户自己申请的机机账号keytab文件名称
|
|
||||||
*/
|
|
||||||
private static final String USER_KEYTAB_FILE = "user.keytab";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* 用户自己申请的机机账号名称
|
|
||||||
*/
|
|
||||||
private static final String USER_PRINCIPAL = "yhy_ahrs_rcw@A528C942_01A6_1BEF_7A75_0187DC82C40F.COM";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* 新Producer 构造函数
|
|
||||||
* @param
|
|
||||||
* @param
|
|
||||||
*/
|
|
||||||
|
|
||||||
@Bean(name = "myKafkaProducer")
|
|
||||||
public KafkaProducer newProducer() {
|
|
||||||
Properties props = new Properties();
|
|
||||||
|
|
||||||
if (true)
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
logger.info("Securitymode start.");
|
|
||||||
//!!注意,安全认证时,需要用户手动修改为自己申请的机机账号
|
|
||||||
LoginUtil.securityPrepare(USER_PRINCIPAL, USER_KEYTAB_FILE);
|
|
||||||
props.put(securityProtocol, "SASL_PLAINTEXT");
|
|
||||||
// props.put("sasl.mechanism", "GSSAPI");
|
|
||||||
// 服务名
|
|
||||||
props.put(saslKerberosServiceName, "kafka");
|
|
||||||
// 域名
|
|
||||||
props.put(kerberosDomainName, "A528C942_01A6_1BEF_7A75_0187DC82C40F.COM");
|
|
||||||
}
|
|
||||||
catch (IOException e)
|
|
||||||
{
|
|
||||||
logger.error("Security prepare failure.");
|
|
||||||
logger.error("The IOException occured.", e);
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
logger.info("Security prepare success.");
|
|
||||||
}else{
|
|
||||||
props.put(securityProtocol, "PLAINTEXT");
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// Broker地址列表
|
|
||||||
props.put(bootstrapServers,kafkaServers);
|
|
||||||
// 客户端ID
|
|
||||||
props.put(clientId, "ruansiProducer");
|
|
||||||
// Key序列化类
|
|
||||||
props.put(keySerializer,
|
|
||||||
"org.apache.kafka.common.serialization.IntegerSerializer");
|
|
||||||
// Value序列化类
|
|
||||||
props.put(valueSerializer,
|
|
||||||
"org.apache.kafka.common.serialization.StringSerializer");
|
|
||||||
//批量发送信息配置
|
|
||||||
props.put("batch.size", 16384);
|
|
||||||
props.put("linger.ms", 1);
|
|
||||||
props.put("buffer.memory", 33554432);
|
|
||||||
// 协议类型:当前支持配置为SASL_PLAINTEXT或者PLAINTEXT
|
|
||||||
//props.put(securityProtocol, "SASL_PLAINTEXT");
|
|
||||||
// // 服务名
|
|
||||||
// props.put(saslKerberosServiceName, "kafka");
|
|
||||||
// // 域名
|
|
||||||
// props.put(kerberosDomainName, "hadoop.hadoop.com");
|
|
||||||
//设置自定义的分区策略类,默认不传key,是粘性分区,尽量往一个分区中发消息。如果key不为null,则默认是按照key的hashcode与 partition的取余来决定哪个partition
|
|
||||||
//props.put("partitioner.class","com.kafka.myparitioner.CidPartitioner");
|
|
||||||
// props.put(securityProtocol, "SASL_PLAINTEXT");
|
|
||||||
// props.put("sasl.jaas.config", "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"zkxc\" password=\"zkxcKafka07252023\";");
|
|
||||||
// props.put("sasl.mechanism", "SCRAM-SHA-256");
|
|
||||||
// KafkaProducer<String, String> producer = new KafkaProducer<>(props);
|
|
||||||
KafkaProducer producer = new KafkaProducer<>(props);
|
|
||||||
|
|
||||||
return producer;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
@ -13,9 +13,10 @@ public final class KafkaProperties
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(KafkaProperties.class);
|
private static final Logger LOG = LoggerFactory.getLogger(KafkaProperties.class);
|
||||||
|
|
||||||
// Topic名称,安全模式下,需要以管理员用户添加当前用户的访问权限
|
// Topic名称,安全模式下,需要以管理员用户添加当前用户的访问权限
|
||||||
public final static String TOPIC = "t_gps_realtime";
|
public final static String TOPIC = "jysb_dwxx";
|
||||||
|
|
||||||
private static Properties serverProps = new Properties();
|
private static Properties serverProps = new Properties();
|
||||||
|
|
||||||
private static Properties producerProps = new Properties();
|
private static Properties producerProps = new Properties();
|
||||||
|
|
||||||
private static Properties consumerProps = new Properties();
|
private static Properties consumerProps = new Properties();
|
||||||
|
|
@ -26,8 +27,9 @@ public final class KafkaProperties
|
||||||
|
|
||||||
private KafkaProperties()
|
private KafkaProperties()
|
||||||
{
|
{
|
||||||
String filePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator;
|
// String filePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator;
|
||||||
|
String filePath = "/home/rsoft/config/";
|
||||||
|
LOG.info("路径=={}",filePath);
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
File proFile = new File(filePath + "producer.properties");
|
File proFile = new File(filePath + "producer.properties");
|
||||||
|
|
@ -48,14 +50,14 @@ public final class KafkaProperties
|
||||||
|
|
||||||
if (serFile.exists())
|
if (serFile.exists())
|
||||||
{
|
{
|
||||||
serverProps.load(new FileInputStream(filePath + "server.properties"));
|
serverProps.load(new FileInputStream(filePath + "server.properties"));
|
||||||
}
|
}
|
||||||
|
|
||||||
File cliFile = new File(filePath + "client.properties");
|
File cliFile = new File(filePath + "client.properties");
|
||||||
|
|
||||||
if (cliFile.exists())
|
if (cliFile.exists())
|
||||||
{
|
{
|
||||||
clientProps.load(new FileInputStream(filePath + "client.properties"));
|
clientProps.load(new FileInputStream(filePath + "client.properties"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
catch (IOException e)
|
catch (IOException e)
|
||||||
|
|
@ -75,11 +77,11 @@ public final class KafkaProperties
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 获取参数值
|
* 获取参数值
|
||||||
* @param key properites的key值
|
* @param key properites的key值
|
||||||
* @param defValue 默认值
|
* @param defValue 默认值
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
public String getValues(String key, String defValue)
|
public String getValues(String key, String defValue)
|
||||||
{
|
{
|
||||||
String rtValue = null;
|
String rtValue = null;
|
||||||
|
|
@ -105,10 +107,10 @@ public final class KafkaProperties
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 根据key值获取server.properties的值
|
* 根据key值获取server.properties的值
|
||||||
* @param key
|
* @param key
|
||||||
* @return
|
* @return
|
||||||
*/
|
*/
|
||||||
private String getPropertiesValue(String key)
|
private String getPropertiesValue(String key)
|
||||||
{
|
{
|
||||||
String rtValue = serverProps.getProperty(key);
|
String rtValue = serverProps.getProperty(key);
|
||||||
|
|
@ -128,7 +130,7 @@ public final class KafkaProperties
|
||||||
// consumer没有,则再向client.properties中获取
|
// consumer没有,则再向client.properties中获取
|
||||||
if (null == rtValue)
|
if (null == rtValue)
|
||||||
{
|
{
|
||||||
rtValue = clientProps.getProperty(key);
|
rtValue = clientProps.getProperty(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
return rtValue;
|
return rtValue;
|
||||||
|
|
|
||||||
|
|
@ -1,35 +0,0 @@
|
||||||
package org.dromara.kafka.consumer.config;
|
|
||||||
|
|
||||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
|
||||||
import org.springframework.context.annotation.Profile;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* <p>description: </p>
|
|
||||||
*
|
|
||||||
* @author chenle
|
|
||||||
* @date 2021-09-06 15:13
|
|
||||||
*/
|
|
||||||
@ConfigurationProperties(prefix = "mykafka")
|
|
||||||
@Profile(value = "dev")
|
|
||||||
public
|
|
||||||
class KafkaPropertiesConfig {
|
|
||||||
private String serverUrl;
|
|
||||||
|
|
||||||
private MyConsumerProperties consumerProperties = new MyConsumerProperties();
|
|
||||||
|
|
||||||
public String getServerUrl() {
|
|
||||||
return serverUrl;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setServerUrl(String serverUrl) {
|
|
||||||
this.serverUrl = serverUrl;
|
|
||||||
}
|
|
||||||
|
|
||||||
public MyConsumerProperties getConsumerProperties() {
|
|
||||||
return consumerProperties;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setConsumerProperties(MyConsumerProperties consumerProperties) {
|
|
||||||
this.consumerProperties = consumerProperties;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -205,7 +205,7 @@ public class LoginUtil {
|
||||||
|
|
||||||
public static void securityPrepare(String principal, String keyTabFile) throws IOException {
|
public static void securityPrepare(String principal, String keyTabFile) throws IOException {
|
||||||
// String filePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator;
|
// String filePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator;
|
||||||
String filePath = "/rsoft/config/";
|
String filePath = "/home/rsoft/config/";
|
||||||
String krbFile = filePath + "krb5.conf";
|
String krbFile = filePath + "krb5.conf";
|
||||||
String userKeyTableFile = filePath + keyTabFile;
|
String userKeyTableFile = filePath + keyTabFile;
|
||||||
|
|
||||||
|
|
@ -225,8 +225,8 @@ public class LoginUtil {
|
||||||
*/
|
*/
|
||||||
public static Boolean isSecurityModel() {
|
public static Boolean isSecurityModel() {
|
||||||
Boolean isSecurity = false;
|
Boolean isSecurity = false;
|
||||||
String krbFilePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator + "kafkaSecurityMode";
|
// String krbFilePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator + "kafkaSecurityMode";
|
||||||
|
String krbFilePath = "/home/rsoft/config/kafkaSecurityMode";
|
||||||
Properties securityProps = new Properties();
|
Properties securityProps = new Properties();
|
||||||
|
|
||||||
// file does not exist.
|
// file does not exist.
|
||||||
|
|
|
||||||
|
|
@ -1,28 +0,0 @@
|
||||||
package org.dromara.kafka.consumer.config;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* <p>description: </p>
|
|
||||||
*
|
|
||||||
* @author chenle
|
|
||||||
* @date 2021-09-07 14:54
|
|
||||||
*/
|
|
||||||
public class MyConsumerProperties {
|
|
||||||
private String clientId;
|
|
||||||
private String groupId = "222";
|
|
||||||
|
|
||||||
public String getClientId() {
|
|
||||||
return clientId;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setClientId(String clientId) {
|
|
||||||
this.clientId = clientId;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getGroupId() {
|
|
||||||
return groupId;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setGroupId(String groupId) {
|
|
||||||
this.groupId = groupId;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,159 +0,0 @@
|
||||||
package org.dromara.kafka.consumer.config;
|
|
||||||
|
|
||||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
|
||||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
|
||||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
|
||||||
import org.dromara.kafka.consumer.handler.KafkaSecurityUtil;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.Properties;
|
|
||||||
|
|
||||||
|
|
||||||
public class NewConsumer extends Thread{
|
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(NewConsumer.class);
|
|
||||||
|
|
||||||
private final KafkaConsumer<Integer, String> consumer;
|
|
||||||
|
|
||||||
private final String topic;
|
|
||||||
|
|
||||||
// 一次请求的最大等待时间
|
|
||||||
private final int waitTime = 10000;
|
|
||||||
|
|
||||||
// Broker连接地址
|
|
||||||
private final String bootstrapServers = "bootstrap.servers";
|
|
||||||
// Group id
|
|
||||||
private final String groupId = "group.id";
|
|
||||||
// 消息内容使用的反序列化类
|
|
||||||
private final String valueDeserializer = "value.deserializer";
|
|
||||||
// 消息Key值使用的反序列化类
|
|
||||||
private final String keyDeserializer = "key.deserializer";
|
|
||||||
// 协议类型:当前支持配置为SASL_PLAINTEXT或者PLAINTEXT
|
|
||||||
private final String securityProtocol = "security.protocol";
|
|
||||||
// 服务名
|
|
||||||
private final String saslKerberosServiceName = "sasl.kerberos.service.name";
|
|
||||||
// 域名
|
|
||||||
private final String kerberosDomainName = "kerberos.domain.name";
|
|
||||||
// 是否自动提交offset
|
|
||||||
private final String enableAutoCommit = "enable.auto.commit";
|
|
||||||
// 自动提交offset的时间间隔
|
|
||||||
private final String autoCommitIntervalMs = "auto.commit.interval.ms";
|
|
||||||
|
|
||||||
// 会话超时时间
|
|
||||||
private final String sessionTimeoutMs = "session.timeout.ms";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* 用户自己申请的机机账号keytab文件名称
|
|
||||||
*/
|
|
||||||
private static final String USER_KEYTAB_FILE = "user.keytab";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* 用户自己申请的机机账号名称
|
|
||||||
*/
|
|
||||||
private static final String USER_PRINCIPAL = "aqdsj_ruansi";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* NewConsumer构造函数
|
|
||||||
* @param topic 订阅的Topic名称
|
|
||||||
*/
|
|
||||||
public NewConsumer(String topic) {
|
|
||||||
|
|
||||||
Properties props = new Properties();
|
|
||||||
|
|
||||||
KafkaProperties kafkaProc = KafkaProperties.getInstance();
|
|
||||||
// Broker连接地址
|
|
||||||
props.put(bootstrapServers,
|
|
||||||
kafkaProc.getValues(bootstrapServers, "localhost:21007"));
|
|
||||||
// Group id
|
|
||||||
props.put(groupId, "DemoConsumer");
|
|
||||||
// 是否自动提交offset
|
|
||||||
props.put(enableAutoCommit, "true");
|
|
||||||
// 自动提交offset的时间间隔
|
|
||||||
props.put(autoCommitIntervalMs, "1000");
|
|
||||||
// 会话超时时间
|
|
||||||
props.put(sessionTimeoutMs, "30000");
|
|
||||||
// 消息Key值使用的反序列化类
|
|
||||||
props.put(keyDeserializer,
|
|
||||||
"org.apache.kafka.common.serialization.IntegerDeserializer");
|
|
||||||
// 消息内容使用的反序列化类
|
|
||||||
props.put(valueDeserializer,
|
|
||||||
"org.apache.kafka.common.serialization.StringDeserializer");
|
|
||||||
// 安全协议类型
|
|
||||||
props.put(securityProtocol, kafkaProc.getValues(securityProtocol, "SASL_PLAINTEXT"));
|
|
||||||
// 服务名
|
|
||||||
props.put(saslKerberosServiceName, "kafka");
|
|
||||||
// 域名
|
|
||||||
props.put(kerberosDomainName, kafkaProc.getValues(kerberosDomainName, "hadoop.hadoop.com"));
|
|
||||||
consumer = new KafkaConsumer<Integer, String>(props);
|
|
||||||
this.topic = topic;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* 订阅Topic的消息处理函数
|
|
||||||
*/
|
|
||||||
public void doWork()
|
|
||||||
{
|
|
||||||
// 订阅
|
|
||||||
consumer.subscribe(Collections.singletonList(this.topic));
|
|
||||||
// 消息消费请求
|
|
||||||
ConsumerRecords<Integer, String> records = consumer.poll(waitTime);
|
|
||||||
// 消息处理
|
|
||||||
for (ConsumerRecord<Integer, String> record : records)
|
|
||||||
{
|
|
||||||
LOG.info("[NewConsumerExample], Received message: (" + record.key() + ", " + record.value()
|
|
||||||
+ ") at offset " + record.offset());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
public static void main(String[] args)
|
|
||||||
{
|
|
||||||
if (KafkaSecurityUtil.isSecurityModel())
|
|
||||||
{
|
|
||||||
try
|
|
||||||
{
|
|
||||||
LOG.info("Securitymode start.");
|
|
||||||
|
|
||||||
//!!注意,安全认证时,需要用户手动修改为自己申请的机机账号
|
|
||||||
KafkaSecurityUtil.securityPrepare();
|
|
||||||
}
|
|
||||||
catch (IOException e)
|
|
||||||
{
|
|
||||||
LOG.error("Security prepare failure.");
|
|
||||||
LOG.error("The IOException occured : {}.", e);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
LOG.info("Security prepare success.");
|
|
||||||
}
|
|
||||||
|
|
||||||
NewConsumer consumerThread = new NewConsumer(KafkaProperties.TOPIC);
|
|
||||||
consumerThread.start();
|
|
||||||
|
|
||||||
// 等到60s后将consumer关闭,实际执行过程中可修改
|
|
||||||
try
|
|
||||||
{
|
|
||||||
Thread.sleep(60000);
|
|
||||||
}
|
|
||||||
catch (InterruptedException e)
|
|
||||||
{
|
|
||||||
LOG.info("The InterruptedException occured : {}.", e);
|
|
||||||
}
|
|
||||||
finally
|
|
||||||
{
|
|
||||||
consumerThread.shutdown();
|
|
||||||
consumerThread.consumer.close();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public synchronized void start() {
|
|
||||||
doWork();
|
|
||||||
}
|
|
||||||
|
|
||||||
private void shutdown(){
|
|
||||||
Thread.currentThread().interrupt();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,30 +0,0 @@
|
||||||
package org.dromara.kafka.consumer.filters;
|
|
||||||
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import javax.servlet.*;
|
|
||||||
import javax.servlet.annotation.WebFilter;
|
|
||||||
import javax.servlet.http.HttpServletRequest;
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* <p>description: </p>
|
|
||||||
*
|
|
||||||
* @author chenle
|
|
||||||
* @date 2021-09-08 15:40
|
|
||||||
*/
|
|
||||||
@WebFilter(filterName="MyFilter",urlPatterns = "/*")
|
|
||||||
public class MyFilter implements Filter {
|
|
||||||
|
|
||||||
private Logger logger = LoggerFactory.getLogger(MyFilter.class);
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain) throws IOException, ServletException {
|
|
||||||
HttpServletRequest request = (HttpServletRequest) servletRequest;
|
|
||||||
String queryString = request.getQueryString();
|
|
||||||
// logger.error("pre,queryString={}",queryString);
|
|
||||||
filterChain.doFilter(servletRequest,servletResponse);
|
|
||||||
// logger.error("queryString={}",queryString);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -27,7 +27,8 @@ import java.util.Objects;
|
||||||
import java.util.concurrent.LinkedBlockingDeque;
|
import java.util.concurrent.LinkedBlockingDeque;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* <p>description: </p>
|
* <p>description: 处理kafka数据并发送到data2es
|
||||||
|
* </p>
|
||||||
*
|
*
|
||||||
* @author chenle
|
* @author chenle
|
||||||
* @date 2021-09-06 16:44
|
* @date 2021-09-06 16:44
|
||||||
|
|
@ -36,78 +37,20 @@ public class ConsumerWorker implements Runnable {
|
||||||
private ConsumerRecord<String, Object> record;
|
private ConsumerRecord<String, Object> record;
|
||||||
private Logger logger = LoggerFactory.getLogger(ConsumerWorker.class);
|
private Logger logger = LoggerFactory.getLogger(ConsumerWorker.class);
|
||||||
|
|
||||||
public static LinkedBlockingDeque linkedBlockingDeque = new LinkedBlockingDeque<>(5000);
|
|
||||||
|
|
||||||
private String cityCode ;
|
public static LinkedBlockingDeque linkedBlockingDeque = new LinkedBlockingDeque<>(1000);
|
||||||
|
|
||||||
ConsumerWorker(ConsumerRecord<String, Object> record, String cityCode) {
|
|
||||||
|
ConsumerWorker(ConsumerRecord<String, Object> record) {
|
||||||
this.record = record;
|
this.record = record;
|
||||||
this.cityCode = cityCode;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
//其他地市使用的方法,这里使用了一个巧妙的方法,我们开发的地市都是传4位,这种其他地市的cityCode传大于4位,然后截取
|
//其他地市使用的方法,这里使用了一个巧妙的方法,我们开发的地市都是传4位,这种其他地市的cityCode传大于4位,然后截取
|
||||||
if(cityCode.length() > 4){
|
luanrequest();
|
||||||
cityCode = cityCode.substring(0,4);
|
|
||||||
normalRequest();
|
|
||||||
}else {
|
|
||||||
//六安、安庆等地市的方法,这些地市都是我们自己公司开发的东西。
|
|
||||||
luanrequest();
|
|
||||||
// luanrequestBatch();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* 废弃方法
|
|
||||||
* */
|
|
||||||
private void luanrequestBatch() {
|
|
||||||
Object value = record.value();
|
|
||||||
String topic = record.topic();
|
|
||||||
List<EsGpsInfo> list = new ArrayList<>();
|
|
||||||
logger.info("offset={},topic={},value={}", record.offset(), topic,value);
|
|
||||||
List<JSONObject> jsonObjects = JSON.parseArray((String) value, JSONObject.class);
|
|
||||||
for (JSONObject jsonObject : jsonObjects) {
|
|
||||||
EsGpsInfo esGpsInfo;
|
|
||||||
/*try {
|
|
||||||
jsonObject = JSONUtil.parseObj(((String) value));
|
|
||||||
}catch (ConvertException e){
|
|
||||||
logger.info("jsonObject=null:error={}",e.getMessage());
|
|
||||||
return;
|
|
||||||
}*/
|
|
||||||
try {
|
|
||||||
esGpsInfo = JSONUtil.toBean(jsonObject, EsGpsInfo.class);
|
|
||||||
}catch (ConvertException e){
|
|
||||||
logger.info("EsGpsInfo=null:error={}",e.getMessage());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if(Objects.isNull(esGpsInfo)){
|
|
||||||
logger.info("esGpsInfo=null no error");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
String deviceCode = esGpsInfo.getDeviceCode();
|
|
||||||
if(StringUtils.isEmpty(deviceCode) || deviceCode.length() > 100){
|
|
||||||
logger.info("deviceCode:{} is null or is too long ",deviceCode);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
String latitude = esGpsInfo.getLat();
|
|
||||||
if(StringUtils.isEmpty(latitude) || "0.0".equals(latitude)){
|
|
||||||
logger.info("latitude:{} is null or is zero ",latitude);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
String longitude = esGpsInfo.getLng();
|
|
||||||
if(StringUtils.isEmpty(longitude) || "0.0".equals(longitude)){
|
|
||||||
logger.info("longitude:{} is null or is zero ",longitude);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
esGpsInfo.setInfoSource(cityCode);
|
|
||||||
|
|
||||||
esGpsInfo.setGpsTime(new Date(Long.valueOf(jsonObject.getStr("gpsTime"))));
|
|
||||||
list.add(esGpsInfo);
|
|
||||||
}
|
|
||||||
// dataToEsService.saveGpsInfoBatch(list);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void luanrequest() {
|
private void luanrequest() {
|
||||||
Object value = record.value();
|
Object value = record.value();
|
||||||
|
|
@ -148,7 +91,12 @@ public class ConsumerWorker implements Runnable {
|
||||||
logger.info("longitude:{} is null or is zero ",longitude);
|
logger.info("longitude:{} is null or is zero ",longitude);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
esGpsInfo.setInfoSource(cityCode);
|
String infoSource = esGpsInfo.getInfoSource();
|
||||||
|
if(StringUtils.isEmpty(infoSource) ){
|
||||||
|
logger.info("infoSource:{} is null ",infoSource);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
esGpsInfo.setGpsTime(new Date(Long.valueOf(jsonObject.getStr("gpsTime"))));
|
esGpsInfo.setGpsTime(new Date(Long.valueOf(jsonObject.getStr("gpsTime"))));
|
||||||
}catch (Exception e){
|
}catch (Exception e){
|
||||||
|
|
@ -169,66 +117,4 @@ public class ConsumerWorker implements Runnable {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* 通用的请求(一般地市采用这个方法)
|
|
||||||
*/
|
|
||||||
private void normalRequest() {
|
|
||||||
Object value = record.value();
|
|
||||||
String topic = record.topic();
|
|
||||||
|
|
||||||
logger.info("offset={},topic={},value={}", record.offset(), topic,value);
|
|
||||||
|
|
||||||
RemoteGpsInfo esGpsInfo = new RemoteGpsInfo();
|
|
||||||
EsGpsInfoVO esGpsInfoVO;
|
|
||||||
try {
|
|
||||||
esGpsInfoVO = JSONUtil.toBean(((String) value), EsGpsInfoVO.class);
|
|
||||||
}catch (ConvertException e){
|
|
||||||
logger.info("esGpsInfoVO=null:error={}",e.getMessage());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if(Objects.isNull(esGpsInfoVO)){
|
|
||||||
logger.info("esGpsInfoVO=null no error");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
try {
|
|
||||||
DateTime parse = DateUtil.parse(esGpsInfoVO.getGpsTime(), "yyyy-MM-dd HH:mm:ss");
|
|
||||||
}catch (Exception e){
|
|
||||||
logger.info("gpsTime:{} format error", esGpsInfoVO.getGpsTime());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
String deviceCode = esGpsInfoVO.getDeviceCode();
|
|
||||||
if(StringUtils.isEmpty(deviceCode) || deviceCode.length() > 100){
|
|
||||||
logger.info("deviceCode:{} is null or is too long ",deviceCode);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
String latitude = esGpsInfoVO.getLatitude();
|
|
||||||
if(StringUtils.isEmpty(latitude) || "0.0".equals(latitude)){
|
|
||||||
logger.info("latitude:{} is null or is zero ",latitude);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
String longitude = esGpsInfoVO.getLongitude();
|
|
||||||
if(StringUtils.isEmpty(longitude) || "0.0".equals(longitude)){
|
|
||||||
logger.info("longitude:{} is null or is zero ",longitude);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
BeanUtil.copyProperties(esGpsInfoVO,esGpsInfo,new CopyOptions());
|
|
||||||
esGpsInfo.setLat(latitude);
|
|
||||||
esGpsInfo.setLng(esGpsInfoVO.getLongitude());
|
|
||||||
esGpsInfo.setOrientation(esGpsInfoVO.getDirection());
|
|
||||||
esGpsInfo.setInfoSource(cityCode);
|
|
||||||
|
|
||||||
boolean offer = linkedBlockingDeque.offer(esGpsInfo);
|
|
||||||
R response = R.ok(offer);
|
|
||||||
if(200 == response.getCode()){
|
|
||||||
logger.info("topic={},data2es={}",topic,"success");
|
|
||||||
}else{
|
|
||||||
logger.error("topic={},data2es={}",topic,"fail");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,22 +1,16 @@
|
||||||
package org.dromara.kafka.consumer.handler;
|
package org.dromara.kafka.consumer.handler;
|
||||||
|
|
||||||
import org.apache.dubbo.config.annotation.DubboReference;
|
|
||||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||||
import org.apache.kafka.common.PartitionInfo;
|
import org.apache.kafka.common.PartitionInfo;
|
||||||
import org.apache.kafka.common.TopicPartition;
|
import org.apache.kafka.common.TopicPartition;
|
||||||
import org.dromara.data2es.api.RemoteDataToEsService;
|
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
|
|
||||||
import org.springframework.kafka.config.KafkaListenerContainerFactory;
|
|
||||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
|
||||||
import org.springframework.kafka.listener.ContainerProperties;
|
|
||||||
import org.springframework.kafka.listener.MessageListener;
|
|
||||||
|
|
||||||
import java.time.Duration;
|
import java.time.Duration;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.concurrent.ThreadPoolExecutor;
|
import java.util.concurrent.ThreadPoolExecutor;
|
||||||
|
|
@ -29,68 +23,39 @@ import java.util.concurrent.ThreadPoolExecutor;
|
||||||
*/
|
*/
|
||||||
public class KafkaConsumerRunnable implements Runnable {
|
public class KafkaConsumerRunnable implements Runnable {
|
||||||
|
|
||||||
private Map props;
|
private final KafkaConsumer<String, Object> consumer;
|
||||||
private ThreadPoolExecutor taskExecutor;
|
private ThreadPoolExecutor taskExecutor;
|
||||||
|
|
||||||
private String cityCode;
|
private String cityCode;
|
||||||
private Logger logger = LoggerFactory.getLogger(KafkaConsumerRunnable.class);
|
private Logger logger = LoggerFactory.getLogger(KafkaConsumerRunnable.class);
|
||||||
|
|
||||||
public KafkaConsumerRunnable(Map props, ThreadPoolExecutor taskExecutor,
|
public KafkaConsumerRunnable(KafkaConsumer<String, Object> consumer, ThreadPoolExecutor taskExecutor) {
|
||||||
String cityCode) {
|
this.consumer = consumer;
|
||||||
this.props = props;
|
|
||||||
this.taskExecutor = taskExecutor;
|
this.taskExecutor = taskExecutor;
|
||||||
this.cityCode = cityCode;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private DefaultKafkaConsumerFactory buildConsumerFactory(){
|
|
||||||
return new DefaultKafkaConsumerFactory<String, String>(props);
|
|
||||||
}
|
|
||||||
|
|
||||||
private ContainerProperties containerProperties(String[] topic, MessageListener<String, Object> messageListener) {
|
|
||||||
ContainerProperties containerProperties = new ContainerProperties(topic);
|
|
||||||
containerProperties.setMessageListener(messageListener);
|
|
||||||
return containerProperties;
|
|
||||||
}
|
|
||||||
|
|
||||||
private KafkaListenerContainerFactory buildListenerFactory(){
|
|
||||||
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory();
|
|
||||||
factory.setConsumerFactory(buildConsumerFactory());
|
|
||||||
factory.setConcurrency(4);
|
|
||||||
factory.setBatchListener(true);
|
|
||||||
|
|
||||||
factory.getContainerProperties().setPollTimeout(3000);
|
|
||||||
return factory;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
KafkaConsumer<String,Object> consumer = new KafkaConsumer<>(props);
|
consumer.subscribe(Collections.singletonList("jysb_dwxx"));
|
||||||
|
|
||||||
List topics = (List) props.get("topics");
|
|
||||||
consumer.subscribe(topics);
|
|
||||||
consumer.poll(0); // 令订阅生效
|
consumer.poll(0); // 令订阅生效
|
||||||
|
|
||||||
List<TopicPartition> topicPartitions = new ArrayList<>();
|
List<TopicPartition> topicPartitions = new ArrayList<>();
|
||||||
Map<String, List<PartitionInfo>> stringListMap = consumer.listTopics();
|
Map<String, List<PartitionInfo>> stringListMap = consumer.listTopics();
|
||||||
for (Object topic : topics) {
|
String topic1 ="jysb_dwxx";
|
||||||
String topic1 = (String) topic;
|
List<PartitionInfo> partitionInfos = stringListMap.get(topic1);
|
||||||
List<PartitionInfo> partitionInfos = stringListMap.get(topic1);
|
for (PartitionInfo partitionInfo : partitionInfos) {
|
||||||
for (PartitionInfo partitionInfo : partitionInfos) {
|
TopicPartition partition = new TopicPartition(topic1, partitionInfo.partition());
|
||||||
TopicPartition partition = new TopicPartition(topic1, partitionInfo.partition());
|
topicPartitions.add(partition);
|
||||||
topicPartitions.add(partition);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
consumer.seekToEnd(topicPartitions); // 如果传Collections.emptyList()表示移动所有订阅topic分区offset到最末端
|
consumer.seekToEnd(topicPartitions); // 如果传Collections.emptyList()表示移动所有订阅topic分区offset到最末端
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
ConsumerRecords<String, Object> records = consumer.poll(Duration.ofMillis(100));
|
ConsumerRecords<String, Object> records = consumer.poll(Duration.ofMillis(100));
|
||||||
for (ConsumerRecord<String, Object> record : records) {
|
for (ConsumerRecord<String, Object> record : records) {
|
||||||
taskExecutor.submit(new ConsumerWorker(record, cityCode));
|
logger.info("[Consumer], Received message: (" + record.key() + ", " + record.value()
|
||||||
|
+ ") at offset " + record.offset());
|
||||||
|
taskExecutor.submit(new ConsumerWorker(record));
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,13 @@
|
||||||
package org.dromara.kafka.consumer.handler;
|
package org.dromara.kafka.consumer.handler;
|
||||||
|
|
||||||
|
|
||||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
import org.apache.kafka.clients.consumer.*;
|
||||||
|
import org.apache.kafka.common.KafkaException;
|
||||||
|
import org.apache.kafka.common.errors.AuthorizationException;
|
||||||
|
import org.apache.kafka.common.errors.RecordDeserializationException;
|
||||||
|
import org.apache.kafka.common.errors.UnsupportedVersionException;
|
||||||
import org.apache.kafka.common.serialization.StringDeserializer;
|
import org.apache.kafka.common.serialization.StringDeserializer;
|
||||||
import org.dromara.kafka.consumer.config.KafkaPropertiesConfig;
|
import org.dromara.kafka.consumer.config.KafkaProperties;
|
||||||
import org.dromara.kafka.consumer.config.LoginUtil;
|
import org.dromara.kafka.consumer.config.LoginUtil;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
@ -13,10 +17,11 @@ import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
|
||||||
import org.springframework.stereotype.Component;
|
import org.springframework.stereotype.Component;
|
||||||
import org.springframework.util.CollectionUtils;
|
import org.springframework.util.CollectionUtils;
|
||||||
|
|
||||||
|
import javax.annotation.Resource;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.HashMap;
|
import java.time.Duration;
|
||||||
import java.util.List;
|
import java.util.*;
|
||||||
import java.util.Map;
|
import java.util.concurrent.CountDownLatch;
|
||||||
import java.util.concurrent.ExecutorService;
|
import java.util.concurrent.ExecutorService;
|
||||||
import java.util.concurrent.Executors;
|
import java.util.concurrent.Executors;
|
||||||
import java.util.concurrent.ThreadPoolExecutor;
|
import java.util.concurrent.ThreadPoolExecutor;
|
||||||
|
|
@ -30,102 +35,156 @@ import java.util.concurrent.ThreadPoolExecutor;
|
||||||
@Component
|
@Component
|
||||||
public class RealConsumer implements CommandLineRunner {
|
public class RealConsumer implements CommandLineRunner {
|
||||||
|
|
||||||
private String kafkaServers;
|
private Logger logger = LoggerFactory.getLogger(RealConsumer.class);
|
||||||
|
|
||||||
private String groupId;
|
private final KafkaConsumer<String, Object> consumer;
|
||||||
|
|
||||||
private String topics;
|
@Resource
|
||||||
|
|
||||||
private String cityCode = "3400";
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@Autowired
|
|
||||||
KafkaPropertiesConfig kafkaPropertiesConfig;
|
|
||||||
|
|
||||||
@Autowired
|
|
||||||
ThreadPoolExecutor dtpExecutor2;
|
ThreadPoolExecutor dtpExecutor2;
|
||||||
|
|
||||||
|
|
||||||
private Logger logger = LoggerFactory.getLogger(RealConsumer.class);
|
private volatile boolean closed;
|
||||||
|
|
||||||
@Override
|
|
||||||
public void run(String... args) throws Exception {
|
|
||||||
kafkaServers = "127.0.0.1:9092";
|
|
||||||
topics = "topic.send.2,topic.send.3,topic.send.4,topic.send.5,topic.send.8";
|
|
||||||
groupId = "group_ruansi_xuancheng";
|
|
||||||
cityCode = "3418";
|
|
||||||
if(args.length > 0){
|
|
||||||
kafkaServers = args[0];
|
|
||||||
topics = args[1];
|
|
||||||
groupId = args[2];
|
|
||||||
cityCode = args[3];
|
|
||||||
|
|
||||||
}
|
|
||||||
ExecutorService executorService = Executors.newSingleThreadExecutor();
|
|
||||||
Map kafkaProp = getKafkaProp();
|
|
||||||
|
|
||||||
|
|
||||||
|
// 一次请求的最大等待时间(S)
|
||||||
|
private final int waitTime = 1;
|
||||||
|
|
||||||
|
// Broker连接地址
|
||||||
|
private final static String BOOTSTRAP_SERVER = "bootstrap.servers";
|
||||||
|
|
||||||
|
// Group id
|
||||||
|
private final static String GROUP_ID = "group.id";
|
||||||
|
|
||||||
|
// 消息内容使用的反序列化类
|
||||||
|
private final static String VALUE_DESERIALIZER = "value.deserializer";
|
||||||
|
|
||||||
|
// 消息Key值使用的反序列化类
|
||||||
|
private final static String KEY_DESERIALIZER = "key.deserializer";
|
||||||
|
|
||||||
|
// 协议类型:当前支持配置为SASL_PLAINTEXT或者PLAINTEXT
|
||||||
|
private final static String SECURITY_PROTOCOL = "security.protocol";
|
||||||
|
|
||||||
|
// 服务名
|
||||||
|
private final static String SASL_KERBEROS_SERVICE_NAME = "sasl.kerberos.service.name";
|
||||||
|
|
||||||
|
// 域名
|
||||||
|
private final static String KERBEROS_DOMAIN_NAME = "kerberos.domain.name";
|
||||||
|
|
||||||
|
// 是否自动提交offset
|
||||||
|
private final static String ENABLE_AUTO_COMMIT = "enable.auto.commit";
|
||||||
|
|
||||||
|
// 自动提交offset的时间间隔
|
||||||
|
private final static String AUTO_COMMIT_INTERVAL_MS = "auto.commit.interval.ms";
|
||||||
|
|
||||||
|
// 会话超时时间
|
||||||
|
private final static String SESSION_TIMEOUT_MS = "session.timeout.ms";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 用户自己申请的机机账号keytab文件名称
|
||||||
|
*/
|
||||||
|
private static final String USER_KEYTAB_FILE = "user.keytab";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 用户自己申请的机机账号名称
|
||||||
|
*/
|
||||||
|
private static final String USER_PRINCIPAL = "yhy_ahrs_rcw";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Consumer构造函数
|
||||||
|
*
|
||||||
|
* @param
|
||||||
|
*/
|
||||||
|
public RealConsumer() {
|
||||||
|
initSecurity();
|
||||||
|
Properties props = initProperties();
|
||||||
|
consumer = new KafkaConsumer<String, Object>(props);
|
||||||
|
// 订阅
|
||||||
|
// consumer.subscribe(Collections.singletonList("jysb_dwxx"));
|
||||||
|
}
|
||||||
|
|
||||||
|
public static Properties initProperties() {
|
||||||
|
Properties props = new Properties();
|
||||||
|
KafkaProperties kafkaProc = KafkaProperties.getInstance();
|
||||||
|
|
||||||
|
// Broker连接地址
|
||||||
|
props.put(BOOTSTRAP_SERVER, kafkaProc.getValues(BOOTSTRAP_SERVER, "localhost:21007"));
|
||||||
|
// Group id
|
||||||
|
props.put(GROUP_ID, kafkaProc.getValues(GROUP_ID, "DemoConsumer"));
|
||||||
|
// 是否自动提交offset
|
||||||
|
props.put(ENABLE_AUTO_COMMIT, kafkaProc.getValues(ENABLE_AUTO_COMMIT, "true"));
|
||||||
|
// 自动提交offset的时间间隔
|
||||||
|
props.put(AUTO_COMMIT_INTERVAL_MS, kafkaProc.getValues(AUTO_COMMIT_INTERVAL_MS,"1000"));
|
||||||
|
// 会话超时时间
|
||||||
|
props.put(SESSION_TIMEOUT_MS, kafkaProc.getValues(SESSION_TIMEOUT_MS, "30000"));
|
||||||
|
// 消息Key值使用的反序列化类
|
||||||
|
props.put(KEY_DESERIALIZER,
|
||||||
|
kafkaProc.getValues(KEY_DESERIALIZER, "org.apache.kafka.common.serialization.StringDeserializer"));
|
||||||
|
// 消息内容使用的反序列化类
|
||||||
|
props.put(VALUE_DESERIALIZER,
|
||||||
|
kafkaProc.getValues(VALUE_DESERIALIZER, "org.apache.kafka.common.serialization.StringDeserializer"));
|
||||||
|
// 安全协议类型
|
||||||
|
props.put(SECURITY_PROTOCOL, kafkaProc.getValues(SECURITY_PROTOCOL, "SASL_PLAINTEXT"));
|
||||||
|
// 服务名
|
||||||
|
props.put(SASL_KERBEROS_SERVICE_NAME, "kafka");
|
||||||
|
// 域名
|
||||||
|
props.put(KERBEROS_DOMAIN_NAME, kafkaProc.getValues(KERBEROS_DOMAIN_NAME, "hadoop.hadoop.com"));
|
||||||
|
|
||||||
|
return props;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 初始化安全认证
|
||||||
|
*/
|
||||||
|
public void initSecurity() {
|
||||||
if (LoginUtil.isSecurityModel())
|
if (LoginUtil.isSecurityModel())
|
||||||
{
|
{
|
||||||
try
|
try {
|
||||||
{
|
|
||||||
logger.info("Securitymode start.");
|
logger.info("Securitymode start.");
|
||||||
|
|
||||||
//!!注意,安全认证时,需要用户手动修改为自己申请的机机账号
|
// !!注意,安全认证时,需要用户手动修改为自己申请的机机账号
|
||||||
//认证方式 SASL_PLAINTEXT 或者 PLAINTEXT
|
LoginUtil.securityPrepare(USER_PRINCIPAL, USER_KEYTAB_FILE);
|
||||||
kafkaProp.put("security.protocol","SASL_PLAINTEXT");
|
} catch (IOException e) {
|
||||||
//服务名
|
|
||||||
kafkaProp.put("sasl.kerberos.service.name","kafka");
|
|
||||||
//域名
|
|
||||||
kafkaProp.put("kerberos.domain.name","hadoop.hadoop.com");
|
|
||||||
LoginUtil.setJaasFile("","");
|
|
||||||
}
|
|
||||||
catch (IOException e)
|
|
||||||
{
|
|
||||||
logger.error("Security prepare failure.");
|
logger.error("Security prepare failure.");
|
||||||
logger.error("The IOException occured.", e);
|
logger.error("The IOException occured.", e);
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
logger.info("Security prepare success.");
|
logger.info("Security prepare success.");
|
||||||
}
|
}
|
||||||
|
|
||||||
KafkaConsumerRunnable runnable = new KafkaConsumerRunnable(kafkaProp,dtpExecutor2,cityCode);
|
|
||||||
executorService.execute(runnable);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 获取kafka配置
|
* 订阅Topic的消息处理函数
|
||||||
* @return
|
|
||||||
*/
|
*/
|
||||||
private Map<String, Object> getKafkaProp() {
|
public void run(String... args) throws Exception{
|
||||||
// Properties map = new Properties();
|
try {
|
||||||
Map<String, Object> map = new HashMap<>();
|
logger.info("进入消费");
|
||||||
map.put("bootstrap.servers",kafkaServers);
|
ExecutorService executorService = Executors.newSingleThreadExecutor();
|
||||||
map.put("group.id",groupId);
|
// realConsumer.run();
|
||||||
map.put("enable.auto.commit", "true");
|
KafkaConsumerRunnable runnable = new KafkaConsumerRunnable(consumer,dtpExecutor2);
|
||||||
map.put("auto.commit.interval.ms", "1000");
|
executorService.execute(runnable);
|
||||||
map.put("session.timeout.ms", "30000");
|
// 消息消费请求
|
||||||
map.put("key.deserializer", StringDeserializer.class);
|
/* ConsumerRecords<String, Object> records = consumer.poll(Duration.ofSeconds(waitTime));
|
||||||
map.put("value.deserializer", StringDeserializer.class);
|
// 消息处理
|
||||||
map.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG,5);
|
for (ConsumerRecord<String, Object> record : records) {
|
||||||
// map.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG,1000 * 5);
|
logger.info("[Consumer], Received message: (" + record.key() + ", " + record.value()
|
||||||
// map.put("ack.mode", "manual_immediate");
|
+ ") at offset " + record.offset());
|
||||||
|
dtpExecutor2.submit(new ConsumerWorker(record));
|
||||||
|
|
||||||
// //认证方式 SASL_PLAINTEXT 或者 PLAINTEXT
|
}*/
|
||||||
// map.put("security.protocol","SASL_PLAINTEXT");
|
} catch (AuthorizationException | UnsupportedVersionException
|
||||||
// //服务名
|
| RecordDeserializationException e) {
|
||||||
// map.put("sasl.kerberos.service.name","kafka");
|
logger.error(e.getMessage());
|
||||||
// //域名
|
// 无法从异常中恢复
|
||||||
// map.put("kerberos.domain.name","hadoop.hadoop.com");
|
} catch (OffsetOutOfRangeException | NoOffsetForPartitionException e) {
|
||||||
String[] split = topics.split(",");
|
logger.error("Invalid or no offset found, using latest");
|
||||||
List list = CollectionUtils.arrayToList(split);
|
consumer.seekToEnd(e.partitions());
|
||||||
map.put("topics", list);
|
consumer.commitSync();
|
||||||
return map;
|
} catch (KafkaException e) {
|
||||||
|
logger.error(e.getMessage());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -7,8 +7,6 @@ import org.springframework.context.annotation.Configuration;
|
||||||
import org.springframework.web.servlet.HandlerInterceptor;
|
import org.springframework.web.servlet.HandlerInterceptor;
|
||||||
import org.springframework.web.servlet.ModelAndView;
|
import org.springframework.web.servlet.ModelAndView;
|
||||||
|
|
||||||
import javax.servlet.http.HttpServletRequest;
|
|
||||||
import javax.servlet.http.HttpServletResponse;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* <p>description: </p>
|
* <p>description: </p>
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,168 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xmlns="http://maven.apache.org/POM/4.0.0"
|
||||||
|
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
<parent>
|
||||||
|
<groupId>org.dromara</groupId>
|
||||||
|
<artifactId>stwzhj-modules</artifactId>
|
||||||
|
<version>${revision}</version>
|
||||||
|
</parent>
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
|
<artifactId>stwzhj-data2StKafka</artifactId>
|
||||||
|
|
||||||
|
<description>
|
||||||
|
stwzhj-data2StKafka 消费地市kafka发送到省厅kafka
|
||||||
|
</description>
|
||||||
|
|
||||||
|
<dependencies>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.dromara</groupId>
|
||||||
|
<artifactId>stwzhj-common-nacos</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.dromara</groupId>
|
||||||
|
<artifactId>stwzhj-common-sentinel</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<!-- RuoYi Common Log -->
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.dromara</groupId>
|
||||||
|
<artifactId>stwzhj-common-log</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.dromara</groupId>
|
||||||
|
<artifactId>stwzhj-common-dict</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.dromara</groupId>
|
||||||
|
<artifactId>stwzhj-common-doc</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.dromara</groupId>
|
||||||
|
<artifactId>stwzhj-common-web</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.dromara</groupId>
|
||||||
|
<artifactId>stwzhj-common-dubbo</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.dromara</groupId>
|
||||||
|
<artifactId>stwzhj-common-seata</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.dromara</groupId>
|
||||||
|
<artifactId>stwzhj-common-idempotent</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.dromara</groupId>
|
||||||
|
<artifactId>stwzhj-common-tenant</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.dromara</groupId>
|
||||||
|
<artifactId>stwzhj-common-security</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.dromara</groupId>
|
||||||
|
<artifactId>stwzhj-common-translation</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.dromara</groupId>
|
||||||
|
<artifactId>stwzhj-common-sensitive</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.dromara</groupId>
|
||||||
|
<artifactId>stwzhj-common-encrypt</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.dromara</groupId>
|
||||||
|
<artifactId>stwzhj-api-data2es</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<!--动态线程池-->
|
||||||
|
<dependency>
|
||||||
|
<groupId>cn.dynamictp</groupId>
|
||||||
|
<artifactId>dynamic-tp-spring-boot-starter-common</artifactId>
|
||||||
|
<version>1.1.0</version>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.kafka</groupId>
|
||||||
|
<artifactId>kafka_2.12</artifactId>
|
||||||
|
<version>3.6.1-h0.cbu.mrs.350.r11</version>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.zookeeper</groupId>
|
||||||
|
<artifactId>zookeeper</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>net.sf.jopt-simple</groupId>
|
||||||
|
<artifactId>jopt-simple</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>com.huawei.mrs</groupId>
|
||||||
|
<artifactId>manager-wc2frm</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.kafka</groupId>
|
||||||
|
<artifactId>kafka-clients</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.xerial.snappy</groupId>
|
||||||
|
<artifactId>snappy-java</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>com.huawei.mrs</groupId>
|
||||||
|
<artifactId>om-controller-api</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>com.101tec</groupId>
|
||||||
|
<artifactId>zkclient</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.kafka</groupId>
|
||||||
|
<artifactId>kafka-clients</artifactId>
|
||||||
|
<version>3.6.1-h0.cbu.mrs.350.r11</version>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
|
||||||
|
</dependencies>
|
||||||
|
|
||||||
|
<build>
|
||||||
|
<finalName>${project.artifactId}</finalName>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.springframework.boot</groupId>
|
||||||
|
<artifactId>spring-boot-maven-plugin</artifactId>
|
||||||
|
<version>${spring-boot.version}</version>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<goals>
|
||||||
|
<goal>repackage</goal>
|
||||||
|
</goals>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
</build>
|
||||||
|
|
||||||
|
</project>
|
||||||
|
|
@ -0,0 +1,22 @@
|
||||||
|
package org.dromara.data2kafka;
|
||||||
|
|
||||||
|
|
||||||
|
import org.apache.dubbo.config.spring.context.annotation.EnableDubbo;
|
||||||
|
import org.springframework.boot.SpringApplication;
|
||||||
|
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||||
|
import org.springframework.boot.context.metrics.buffering.BufferingApplicationStartup;
|
||||||
|
import org.springframework.scheduling.annotation.EnableScheduling;
|
||||||
|
|
||||||
|
@EnableDubbo
|
||||||
|
@EnableScheduling
|
||||||
|
@SpringBootApplication
|
||||||
|
public class Data2KafkaApplication {
|
||||||
|
|
||||||
|
public static void main(String[] args) {
|
||||||
|
SpringApplication application = new SpringApplication(Data2KafkaApplication.class);
|
||||||
|
application.setApplicationStartup(new BufferingApplicationStartup(2048));
|
||||||
|
application.run(args);
|
||||||
|
System.out.println("(♥◠‿◠)ノ゙ 消费数据发送至省厅启动成功 ლ(´ڡ`ლ)゙ ");
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,69 @@
|
||||||
|
package org.dromara.data2kafka.config;
|
||||||
|
|
||||||
|
import com.dtp.common.em.QueueTypeEnum;
|
||||||
|
import com.dtp.common.em.RejectedTypeEnum;
|
||||||
|
import com.dtp.core.support.ThreadPoolBuilder;
|
||||||
|
import org.springframework.context.annotation.Bean;
|
||||||
|
import org.springframework.context.annotation.Configuration;
|
||||||
|
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
|
||||||
|
|
||||||
|
import java.util.concurrent.LinkedBlockingQueue;
|
||||||
|
import java.util.concurrent.ThreadPoolExecutor;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* <p>description: </p>
|
||||||
|
*
|
||||||
|
* @author chenle
|
||||||
|
* @date 2021-09-06 16:31
|
||||||
|
*/
|
||||||
|
@Configuration
|
||||||
|
public class AsyncConfig {
|
||||||
|
|
||||||
|
@Bean("taskExecutor")
|
||||||
|
public ThreadPoolTaskExecutor taskExecutor(){
|
||||||
|
ThreadPoolTaskExecutor taskExecutor = new ThreadPoolTaskExecutor();
|
||||||
|
taskExecutor.setCorePoolSize(8);
|
||||||
|
taskExecutor.setMaxPoolSize(20);
|
||||||
|
taskExecutor.setQueueCapacity(200);
|
||||||
|
taskExecutor.setKeepAliveSeconds(60);
|
||||||
|
taskExecutor.setThreadNamePrefix("hfapp--kafkaConsumer--");
|
||||||
|
taskExecutor.setWaitForTasksToCompleteOnShutdown(true);
|
||||||
|
taskExecutor.setAwaitTerminationSeconds(60);
|
||||||
|
taskExecutor.setRejectedExecutionHandler(new ThreadPoolExecutor.DiscardOldestPolicy());
|
||||||
|
return taskExecutor;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* tips: 建议直接在配置中心配置就行,不用 @Bean 声明
|
||||||
|
* @return 线程池实例
|
||||||
|
*/
|
||||||
|
// @Bean(name = "dtpExecutor2")
|
||||||
|
public ThreadPoolExecutor dtpExecutor2() {
|
||||||
|
return ThreadPoolBuilder.newBuilder()
|
||||||
|
.threadPoolName("dtpExecutor2")
|
||||||
|
.corePoolSize(8)
|
||||||
|
.maximumPoolSize(20)
|
||||||
|
.keepAliveTime(60)
|
||||||
|
.timeUnit(TimeUnit.MILLISECONDS)
|
||||||
|
.workQueue(QueueTypeEnum.VARIABLE_LINKED_BLOCKING_QUEUE.getName(), 1024, false)
|
||||||
|
.waitForTasksToCompleteOnShutdown(true)
|
||||||
|
.awaitTerminationSeconds(60)
|
||||||
|
.rejectedExecutionHandler(RejectedTypeEnum.CALLER_RUNS_POLICY.getName())
|
||||||
|
.buildDynamic();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Bean(name = "threadPoolExecutor")
|
||||||
|
public ThreadPoolExecutor threadPoolExecutor() {
|
||||||
|
return new ThreadPoolExecutor(
|
||||||
|
8, // 核心线程数
|
||||||
|
20, // 最大线程数
|
||||||
|
60, // 空闲时间300秒
|
||||||
|
TimeUnit.SECONDS,
|
||||||
|
new LinkedBlockingQueue<>(10000), // 任务队列最大长度
|
||||||
|
new ThreadPoolExecutor.CallerRunsPolicy() // 拒绝策略:由调用线程处理
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
@ -0,0 +1,138 @@
|
||||||
|
package org.dromara.data2kafka.config;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.FileInputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Properties;
|
||||||
|
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
public final class KafkaProperties
|
||||||
|
{
|
||||||
|
private static final Logger LOG = LoggerFactory.getLogger(KafkaProperties.class);
|
||||||
|
|
||||||
|
// Topic名称,安全模式下,需要以管理员用户添加当前用户的访问权限
|
||||||
|
public final static String TOPIC = "jysb_dwxx";
|
||||||
|
|
||||||
|
private static Properties serverProps = new Properties();
|
||||||
|
|
||||||
|
private static Properties producerProps = new Properties();
|
||||||
|
|
||||||
|
private static Properties consumerProps = new Properties();
|
||||||
|
|
||||||
|
private static Properties clientProps = new Properties();
|
||||||
|
|
||||||
|
private static KafkaProperties instance = null;
|
||||||
|
|
||||||
|
private KafkaProperties()
|
||||||
|
{
|
||||||
|
// String filePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator;
|
||||||
|
String filePath = "/home/rsoft/config/";
|
||||||
|
LOG.info("路径=={}",filePath);
|
||||||
|
try
|
||||||
|
{
|
||||||
|
File proFile = new File(filePath + "producer.properties");
|
||||||
|
|
||||||
|
if (proFile.exists())
|
||||||
|
{
|
||||||
|
producerProps.load(new FileInputStream(filePath + "producer.properties"));
|
||||||
|
}
|
||||||
|
|
||||||
|
File conFile = new File(filePath + "producer.properties");
|
||||||
|
|
||||||
|
if (conFile.exists())
|
||||||
|
{
|
||||||
|
consumerProps.load(new FileInputStream(filePath + "consumer.properties"));
|
||||||
|
}
|
||||||
|
|
||||||
|
File serFile = new File(filePath + "server.properties");
|
||||||
|
|
||||||
|
if (serFile.exists())
|
||||||
|
{
|
||||||
|
serverProps.load(new FileInputStream(filePath + "server.properties"));
|
||||||
|
}
|
||||||
|
|
||||||
|
File cliFile = new File(filePath + "client.properties");
|
||||||
|
|
||||||
|
if (cliFile.exists())
|
||||||
|
{
|
||||||
|
clientProps.load(new FileInputStream(filePath + "client.properties"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch (IOException e)
|
||||||
|
{
|
||||||
|
LOG.info("The Exception occured.", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public synchronized static KafkaProperties getInstance()
|
||||||
|
{
|
||||||
|
if (null == instance)
|
||||||
|
{
|
||||||
|
instance = new KafkaProperties();
|
||||||
|
}
|
||||||
|
|
||||||
|
return instance;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 获取参数值
|
||||||
|
* @param key properites的key值
|
||||||
|
* @param defValue 默认值
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
public String getValues(String key, String defValue)
|
||||||
|
{
|
||||||
|
String rtValue = null;
|
||||||
|
|
||||||
|
if (null == key)
|
||||||
|
{
|
||||||
|
LOG.error("key is null");
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
rtValue = getPropertiesValue(key);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (null == rtValue)
|
||||||
|
{
|
||||||
|
LOG.warn("KafkaProperties.getValues return null, key is " + key);
|
||||||
|
rtValue = defValue;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG.info("KafkaProperties.getValues: key is " + key + "; Value is " + rtValue);
|
||||||
|
|
||||||
|
return rtValue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 根据key值获取server.properties的值
|
||||||
|
* @param key
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
private String getPropertiesValue(String key)
|
||||||
|
{
|
||||||
|
String rtValue = serverProps.getProperty(key);
|
||||||
|
|
||||||
|
// server.properties中没有,则再向producer.properties中获取
|
||||||
|
if (null == rtValue)
|
||||||
|
{
|
||||||
|
rtValue = producerProps.getProperty(key);
|
||||||
|
}
|
||||||
|
|
||||||
|
// producer中没有,则再向consumer.properties中获取
|
||||||
|
if (null == rtValue)
|
||||||
|
{
|
||||||
|
rtValue = consumerProps.getProperty(key);
|
||||||
|
}
|
||||||
|
|
||||||
|
// consumer没有,则再向client.properties中获取
|
||||||
|
if (null == rtValue)
|
||||||
|
{
|
||||||
|
rtValue = clientProps.getProperty(key);
|
||||||
|
}
|
||||||
|
|
||||||
|
return rtValue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,259 @@
|
||||||
|
package org.dromara.data2kafka.config;
|
||||||
|
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.FileInputStream;
|
||||||
|
import java.io.FileWriter;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Properties;
|
||||||
|
|
||||||
|
public class LoginUtil {
|
||||||
|
private static final Logger LOG = LoggerFactory.getLogger(LoginUtil.class);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* no JavaDoc
|
||||||
|
*/
|
||||||
|
public enum Module {
|
||||||
|
KAFKA("KafkaClient"), ZOOKEEPER("Client");
|
||||||
|
|
||||||
|
private String name;
|
||||||
|
|
||||||
|
private Module(String name)
|
||||||
|
{
|
||||||
|
this.name = name;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getName()
|
||||||
|
{
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* line operator string
|
||||||
|
*/
|
||||||
|
private static final String LINE_SEPARATOR = System.getProperty("line.separator");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* jaas file postfix
|
||||||
|
*/
|
||||||
|
private static final String JAAS_POSTFIX = ".jaas.conf";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* is IBM jdk or not
|
||||||
|
*/
|
||||||
|
private static final boolean IS_IBM_JDK = System.getProperty("java.vendor").contains("IBM");
|
||||||
|
|
||||||
|
/**
|
||||||
|
* IBM jdk login module
|
||||||
|
*/
|
||||||
|
private static final String IBM_LOGIN_MODULE = "com.ibm.security.auth.module.Krb5LoginModule required";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* oracle jdk login module
|
||||||
|
*/
|
||||||
|
private static final String SUN_LOGIN_MODULE = "com.sun.security.auth.module.Krb5LoginModule required";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Zookeeper quorum principal.
|
||||||
|
*/
|
||||||
|
public static final String ZOOKEEPER_AUTH_PRINCIPAL = "zookeeper.server.principal";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* java security krb5 file path
|
||||||
|
*/
|
||||||
|
public static final String JAVA_SECURITY_KRB5_CONF = "java.security.krb5.conf";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* java security login file path
|
||||||
|
*/
|
||||||
|
public static final String JAVA_SECURITY_LOGIN_CONF = "java.security.auth.login.config";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 设置jaas.conf文件
|
||||||
|
*
|
||||||
|
* @param principal
|
||||||
|
* @param keytabPath
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public static void setJaasFile(String principal, String keytabPath)
|
||||||
|
throws IOException {
|
||||||
|
String jaasPath =
|
||||||
|
new File(System.getProperty("java.io.tmpdir")) + File.separator + System.getProperty("user.name")
|
||||||
|
+ JAAS_POSTFIX;
|
||||||
|
|
||||||
|
// windows路径下分隔符替换
|
||||||
|
jaasPath = jaasPath.replace("\\", "\\\\");
|
||||||
|
// 删除jaas文件
|
||||||
|
deleteJaasFile(jaasPath);
|
||||||
|
writeJaasFile(jaasPath, principal, keytabPath);
|
||||||
|
System.setProperty(JAVA_SECURITY_LOGIN_CONF, jaasPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 设置zookeeper服务端principal
|
||||||
|
*
|
||||||
|
* @param zkServerPrincipal
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public static void setZookeeperServerPrincipal(String zkServerPrincipal)
|
||||||
|
throws IOException {
|
||||||
|
System.setProperty(ZOOKEEPER_AUTH_PRINCIPAL, zkServerPrincipal);
|
||||||
|
String ret = System.getProperty(ZOOKEEPER_AUTH_PRINCIPAL);
|
||||||
|
if (ret == null)
|
||||||
|
{
|
||||||
|
throw new IOException(ZOOKEEPER_AUTH_PRINCIPAL + " is null.");
|
||||||
|
}
|
||||||
|
if (!ret.equals(zkServerPrincipal))
|
||||||
|
{
|
||||||
|
throw new IOException(ZOOKEEPER_AUTH_PRINCIPAL + " is " + ret + " is not " + zkServerPrincipal + ".");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 设置krb5文件
|
||||||
|
*
|
||||||
|
* @param krb5ConfFile
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public static void setKrb5Config(String krb5ConfFile)
|
||||||
|
throws IOException {
|
||||||
|
System.setProperty(JAVA_SECURITY_KRB5_CONF, krb5ConfFile);
|
||||||
|
String ret = System.getProperty(JAVA_SECURITY_KRB5_CONF);
|
||||||
|
if (ret == null)
|
||||||
|
{
|
||||||
|
throw new IOException(JAVA_SECURITY_KRB5_CONF + " is null.");
|
||||||
|
}
|
||||||
|
if (!ret.equals(krb5ConfFile))
|
||||||
|
{
|
||||||
|
throw new IOException(JAVA_SECURITY_KRB5_CONF + " is " + ret + " is not " + krb5ConfFile + ".");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 写入jaas文件
|
||||||
|
*
|
||||||
|
* @throws IOException
|
||||||
|
* 写文件异常
|
||||||
|
*/
|
||||||
|
private static void writeJaasFile(String jaasPath, String principal, String keytabPath)
|
||||||
|
throws IOException {
|
||||||
|
FileWriter writer = new FileWriter(new File(jaasPath));
|
||||||
|
try
|
||||||
|
{
|
||||||
|
writer.write(getJaasConfContext(principal, keytabPath));
|
||||||
|
writer.flush();
|
||||||
|
}
|
||||||
|
catch (IOException e)
|
||||||
|
{
|
||||||
|
throw new IOException("Failed to create jaas.conf File");
|
||||||
|
}
|
||||||
|
finally
|
||||||
|
{
|
||||||
|
writer.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void deleteJaasFile(String jaasPath)
|
||||||
|
throws IOException {
|
||||||
|
File jaasFile = new File(jaasPath);
|
||||||
|
if (jaasFile.exists())
|
||||||
|
{
|
||||||
|
if (!jaasFile.delete())
|
||||||
|
{
|
||||||
|
throw new IOException("Failed to delete exists jaas file.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static String getJaasConfContext(String principal, String keytabPath) {
|
||||||
|
Module[] allModule = Module.values();
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
for (Module modlue : allModule)
|
||||||
|
{
|
||||||
|
builder.append(getModuleContext(principal, keytabPath, modlue));
|
||||||
|
}
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
private static String getModuleContext(String userPrincipal, String keyTabPath, Module module) {
|
||||||
|
StringBuilder builder = new StringBuilder();
|
||||||
|
if (IS_IBM_JDK) {
|
||||||
|
builder.append(module.getName()).append(" {").append(LINE_SEPARATOR);
|
||||||
|
builder.append(IBM_LOGIN_MODULE).append(LINE_SEPARATOR);
|
||||||
|
builder.append("credsType=both").append(LINE_SEPARATOR);
|
||||||
|
builder.append("principal=\"" + userPrincipal + "\"").append(LINE_SEPARATOR);
|
||||||
|
builder.append("useKeytab=\"" + keyTabPath + "\"").append(LINE_SEPARATOR);
|
||||||
|
builder.append("debug=true;").append(LINE_SEPARATOR);
|
||||||
|
builder.append("};").append(LINE_SEPARATOR);
|
||||||
|
} else {
|
||||||
|
builder.append(module.getName()).append(" {").append(LINE_SEPARATOR);
|
||||||
|
builder.append(SUN_LOGIN_MODULE).append(LINE_SEPARATOR);
|
||||||
|
builder.append("useKeyTab=true").append(LINE_SEPARATOR);
|
||||||
|
builder.append("keyTab=\"" + keyTabPath + "\"").append(LINE_SEPARATOR);
|
||||||
|
builder.append("principal=\"" + userPrincipal + "\"").append(LINE_SEPARATOR);
|
||||||
|
builder.append("useTicketCache=false").append(LINE_SEPARATOR);
|
||||||
|
builder.append("storeKey=true").append(LINE_SEPARATOR);
|
||||||
|
builder.append("debug=true;").append(LINE_SEPARATOR);
|
||||||
|
builder.append("};").append(LINE_SEPARATOR);
|
||||||
|
}
|
||||||
|
|
||||||
|
return builder.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void securityPrepare(String principal, String keyTabFile) throws IOException {
|
||||||
|
// String filePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator;
|
||||||
|
String filePath = "/home/rsoft/config/";
|
||||||
|
String krbFile = filePath + "krb5.conf";
|
||||||
|
String userKeyTableFile = filePath + keyTabFile;
|
||||||
|
|
||||||
|
// windows路径下分隔符替换
|
||||||
|
userKeyTableFile = userKeyTableFile.replace("\\", "\\\\");
|
||||||
|
krbFile = krbFile.replace("\\", "\\\\");
|
||||||
|
|
||||||
|
LoginUtil.setKrb5Config(krbFile);
|
||||||
|
LoginUtil.setZookeeperServerPrincipal("zookeeper/hadoop.hadoop.com");
|
||||||
|
LoginUtil.setJaasFile(principal, userKeyTableFile);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check security mode
|
||||||
|
*
|
||||||
|
* @return boolean
|
||||||
|
*/
|
||||||
|
public static Boolean isSecurityModel() {
|
||||||
|
Boolean isSecurity = false;
|
||||||
|
// String krbFilePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator + "kafkaSecurityMode";
|
||||||
|
String krbFilePath = "/home/rsoft/config/kafkaSecurityMode";
|
||||||
|
Properties securityProps = new Properties();
|
||||||
|
|
||||||
|
// file does not exist.
|
||||||
|
if (!isFileExists(krbFilePath)) {
|
||||||
|
return isSecurity;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
securityProps.load(new FileInputStream(krbFilePath));
|
||||||
|
|
||||||
|
if ("yes".equalsIgnoreCase(securityProps.getProperty("kafka.client.security.mode")))
|
||||||
|
{
|
||||||
|
isSecurity = true;
|
||||||
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
LOG.info("The Exception occured : {}.", e);
|
||||||
|
}
|
||||||
|
|
||||||
|
return isSecurity;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* 判断文件是否存在
|
||||||
|
*/
|
||||||
|
private static boolean isFileExists(String fileName) {
|
||||||
|
File file = new File(fileName);
|
||||||
|
|
||||||
|
return file.exists();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,220 @@
|
||||||
|
package org.dromara.data2kafka.consumer;
|
||||||
|
|
||||||
|
import cn.hutool.core.bean.BeanUtil;
|
||||||
|
import cn.hutool.core.bean.copier.CopyOptions;
|
||||||
|
import cn.hutool.core.convert.ConvertException;
|
||||||
|
import cn.hutool.core.date.DateTime;
|
||||||
|
import cn.hutool.core.date.DateUtil;
|
||||||
|
import cn.hutool.json.JSONObject;
|
||||||
|
import cn.hutool.json.JSONUtil;
|
||||||
|
import com.alibaba.fastjson.JSON;
|
||||||
|
import org.apache.commons.lang.StringUtils;
|
||||||
|
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||||
|
import org.dromara.data2es.api.domain.RemoteGpsInfo;
|
||||||
|
import org.dromara.data2kafka.domain.EsGpsInfo;
|
||||||
|
import org.dromara.data2kafka.domain.EsGpsInfoVO;
|
||||||
|
import org.dromara.data2kafka.producer.NewProducer;
|
||||||
|
import org.dromara.data2kafka.producer.Producer;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Date;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Objects;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* <p>description: </p>
|
||||||
|
*
|
||||||
|
* @author chenle
|
||||||
|
* @date 2021-09-06 16:44
|
||||||
|
*/
|
||||||
|
public class ConsumerWorker implements Runnable {
|
||||||
|
private ConsumerRecord<String, Object> record;
|
||||||
|
|
||||||
|
|
||||||
|
private final Producer producer;
|
||||||
|
|
||||||
|
private Logger logger = LoggerFactory.getLogger(ConsumerWorker.class);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
private String cityCode ;
|
||||||
|
|
||||||
|
ConsumerWorker(ConsumerRecord<String, Object> record, String cityCode) {
|
||||||
|
this.producer = Producer.getInstance();
|
||||||
|
this.record = record;
|
||||||
|
this.cityCode = cityCode;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
//其他地市使用的方法,这里使用了一个巧妙的方法,我们开发的地市都是传4位,这种其他地市的cityCode传大于4位,然后截取
|
||||||
|
if(cityCode.length() > 4){
|
||||||
|
cityCode = cityCode.substring(0,4);
|
||||||
|
normalRequest();
|
||||||
|
}else {
|
||||||
|
//六安、安庆等地市的方法,这些地市都是我们自己公司开发的东西。
|
||||||
|
luanrequest();
|
||||||
|
// luanrequestBatch();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* 废弃方法
|
||||||
|
* */
|
||||||
|
private void luanrequestBatch() {
|
||||||
|
Object value = record.value();
|
||||||
|
String topic = record.topic();
|
||||||
|
List<EsGpsInfo> list = new ArrayList<>();
|
||||||
|
logger.info("offset={},topic={},value={}", record.offset(), topic,value);
|
||||||
|
List<JSONObject> jsonObjects = JSON.parseArray((String) value, JSONObject.class);
|
||||||
|
for (JSONObject jsonObject : jsonObjects) {
|
||||||
|
EsGpsInfo esGpsInfo;
|
||||||
|
/*try {
|
||||||
|
jsonObject = JSONUtil.parseObj(((String) value));
|
||||||
|
}catch (ConvertException e){
|
||||||
|
logger.info("jsonObject=null:error={}",e.getMessage());
|
||||||
|
return;
|
||||||
|
}*/
|
||||||
|
try {
|
||||||
|
esGpsInfo = JSONUtil.toBean(jsonObject, EsGpsInfo.class);
|
||||||
|
}catch (ConvertException e){
|
||||||
|
logger.info("EsGpsInfo=null:error={}",e.getMessage());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(Objects.isNull(esGpsInfo)){
|
||||||
|
logger.info("esGpsInfo=null no error");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
String deviceCode = esGpsInfo.getDeviceCode();
|
||||||
|
if(StringUtils.isEmpty(deviceCode) || deviceCode.length() > 100){
|
||||||
|
logger.info("deviceCode:{} is null or is too long ",deviceCode);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
String latitude = esGpsInfo.getLat();
|
||||||
|
if(StringUtils.isEmpty(latitude) || "0.0".equals(latitude)){
|
||||||
|
logger.info("latitude:{} is null or is zero ",latitude);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
String longitude = esGpsInfo.getLng();
|
||||||
|
if(StringUtils.isEmpty(longitude) || "0.0".equals(longitude)){
|
||||||
|
logger.info("longitude:{} is null or is zero ",longitude);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
esGpsInfo.setInfoSource(cityCode);
|
||||||
|
|
||||||
|
esGpsInfo.setGpsTime(new Date(Long.valueOf(jsonObject.getStr("gpsTime"))));
|
||||||
|
list.add(esGpsInfo);
|
||||||
|
}
|
||||||
|
// dataToEsService.saveGpsInfoBatch(list);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void luanrequest() {
|
||||||
|
Object value = record.value();
|
||||||
|
String topic = record.topic();
|
||||||
|
|
||||||
|
// logger.info("offset={},topic={},value={}", record.offset(), topic,value);
|
||||||
|
RemoteGpsInfo esGpsInfo;
|
||||||
|
JSONObject jsonObject;
|
||||||
|
try {
|
||||||
|
jsonObject = JSONUtil.parseObj(((String) value));
|
||||||
|
}catch (ConvertException e){
|
||||||
|
logger.info("jsonObject=null:error={}",e.getMessage());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
esGpsInfo = JSONUtil.toBean(jsonObject, RemoteGpsInfo.class);
|
||||||
|
}catch (ConvertException e){
|
||||||
|
logger.info("EsGpsInfo=null:error={}",e.getMessage());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(Objects.isNull(esGpsInfo)){
|
||||||
|
logger.info("esGpsInfo=null no error");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
String deviceCode = esGpsInfo.getDeviceCode();
|
||||||
|
if(StringUtils.isEmpty(deviceCode) || deviceCode.length() > 100){
|
||||||
|
logger.info("deviceCode:{} is null or is too long ",deviceCode);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
String latitude = esGpsInfo.getLat();
|
||||||
|
if(StringUtils.isEmpty(latitude) || "0.0".equals(latitude)){
|
||||||
|
logger.info("latitude:{} is null or is zero ",latitude);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
String longitude = esGpsInfo.getLng();
|
||||||
|
if(StringUtils.isEmpty(longitude) || "0.0".equals(longitude)){
|
||||||
|
logger.info("longitude:{} is null or is zero ",longitude);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
esGpsInfo.setInfoSource(cityCode);
|
||||||
|
try {
|
||||||
|
esGpsInfo.setGpsTime(new Date(Long.valueOf(jsonObject.getStr("gpsTime"))));
|
||||||
|
}catch (Exception e){
|
||||||
|
logger.error("error_msg={}",e.getMessage());
|
||||||
|
}
|
||||||
|
producer.sendMessage("jysb_dwxx",JSONUtil.toJsonStr(esGpsInfo));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 通用的请求(一般地市采用这个方法)
|
||||||
|
*/
|
||||||
|
private void normalRequest() {
|
||||||
|
Object value = record.value();
|
||||||
|
String topic = record.topic();
|
||||||
|
|
||||||
|
logger.info("offset={},topic={},value={}", record.offset(), topic,value);
|
||||||
|
|
||||||
|
EsGpsInfo gpsInfo = new EsGpsInfo();
|
||||||
|
EsGpsInfoVO esGpsInfoVO;
|
||||||
|
try {
|
||||||
|
esGpsInfoVO = JSONUtil.toBean(((String) value), EsGpsInfoVO.class);
|
||||||
|
}catch (ConvertException e){
|
||||||
|
logger.info("esGpsInfoVO=null:error={}",e.getMessage());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if(Objects.isNull(esGpsInfoVO)){
|
||||||
|
logger.info("esGpsInfoVO=null no error");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
try {
|
||||||
|
DateTime parse = DateUtil.parse(esGpsInfoVO.getGpsTime(), "yyyy-MM-dd HH:mm:ss");
|
||||||
|
}catch (Exception e){
|
||||||
|
logger.info("gpsTime:{} format error", esGpsInfoVO.getGpsTime());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
String deviceCode = esGpsInfoVO.getDeviceCode();
|
||||||
|
if(StringUtils.isEmpty(deviceCode) || deviceCode.length() > 100){
|
||||||
|
logger.info("deviceCode:{} is null or is too long ",deviceCode);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
String latitude = esGpsInfoVO.getLatitude();
|
||||||
|
if(StringUtils.isEmpty(latitude) || "0.0".equals(latitude)){
|
||||||
|
logger.info("latitude:{} is null or is zero ",latitude);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
String longitude = esGpsInfoVO.getLongitude();
|
||||||
|
if(StringUtils.isEmpty(longitude) || "0.0".equals(longitude)){
|
||||||
|
logger.info("longitude:{} is null or is zero ",longitude);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
BeanUtil.copyProperties(esGpsInfoVO,gpsInfo,new CopyOptions());
|
||||||
|
gpsInfo.setLat(latitude);
|
||||||
|
gpsInfo.setLng(esGpsInfoVO.getLongitude());
|
||||||
|
gpsInfo.setOrientation(esGpsInfoVO.getDirection());
|
||||||
|
gpsInfo.setInfoSource(cityCode);
|
||||||
|
producer.sendMessage("jysb_dwxx",JSONUtil.toJsonStr(gpsInfo));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,67 @@
|
||||||
|
package org.dromara.data2kafka.consumer;
|
||||||
|
|
||||||
|
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||||
|
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||||
|
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||||
|
import org.apache.kafka.common.PartitionInfo;
|
||||||
|
import org.apache.kafka.common.TopicPartition;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import java.time.Duration;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.ThreadPoolExecutor;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* <p>description: </p>
|
||||||
|
*
|
||||||
|
* @author chenle
|
||||||
|
* @date 2021-09-06 16:39
|
||||||
|
*/
|
||||||
|
public class KafkaConsumerRunnable implements Runnable {
|
||||||
|
|
||||||
|
private Map props;
|
||||||
|
private ThreadPoolExecutor taskExecutor;
|
||||||
|
|
||||||
|
private String cityCode;
|
||||||
|
private Logger logger = LoggerFactory.getLogger(KafkaConsumerRunnable.class);
|
||||||
|
|
||||||
|
public KafkaConsumerRunnable(Map props, ThreadPoolExecutor taskExecutor,
|
||||||
|
String cityCode) {
|
||||||
|
this.props = props;
|
||||||
|
this.taskExecutor = taskExecutor;
|
||||||
|
this.cityCode = cityCode;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
KafkaConsumer<String,Object> consumer = new KafkaConsumer<>(props);
|
||||||
|
|
||||||
|
List topics = (List) props.get("topics");
|
||||||
|
consumer.subscribe(topics);
|
||||||
|
consumer.poll(0); // 令订阅生效
|
||||||
|
|
||||||
|
List<TopicPartition> topicPartitions = new ArrayList<>();
|
||||||
|
Map<String, List<PartitionInfo>> stringListMap = consumer.listTopics();
|
||||||
|
for (Object topic : topics) {
|
||||||
|
String topic1 = (String) topic;
|
||||||
|
List<PartitionInfo> partitionInfos = stringListMap.get(topic1);
|
||||||
|
for (PartitionInfo partitionInfo : partitionInfos) {
|
||||||
|
TopicPartition partition = new TopicPartition(topic1, partitionInfo.partition());
|
||||||
|
topicPartitions.add(partition);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
consumer.seekToEnd(topicPartitions); // 如果传Collections.emptyList()表示移动所有订阅topic分区offset到最末端
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
ConsumerRecords<String, Object> records = consumer.poll(Duration.ofMillis(100));
|
||||||
|
for (ConsumerRecord<String, Object> record : records) {
|
||||||
|
taskExecutor.submit(new ConsumerWorker(record, cityCode));
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,144 @@
|
||||||
|
package org.dromara.data2kafka.consumer;
|
||||||
|
|
||||||
|
|
||||||
|
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||||
|
import org.apache.kafka.common.serialization.StringDeserializer;
|
||||||
|
import org.dromara.data2kafka.config.LoginUtil;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
|
import org.springframework.boot.CommandLineRunner;
|
||||||
|
import org.springframework.stereotype.Component;
|
||||||
|
import org.springframework.util.CollectionUtils;
|
||||||
|
|
||||||
|
import javax.annotation.Resource;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.ConnectException;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
import java.net.Socket;
|
||||||
|
import java.net.UnknownHostException;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.ExecutorService;
|
||||||
|
import java.util.concurrent.Executors;
|
||||||
|
import java.util.concurrent.ThreadPoolExecutor;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* <p>description: </p>
|
||||||
|
*
|
||||||
|
* @author chenle
|
||||||
|
* @date 2021-09-06 11:15
|
||||||
|
*/
|
||||||
|
@Component
|
||||||
|
public class RealConsumer implements CommandLineRunner {
|
||||||
|
|
||||||
|
private String kafkaServers;
|
||||||
|
|
||||||
|
private String groupId;
|
||||||
|
|
||||||
|
private String topics;
|
||||||
|
|
||||||
|
private String cityCode = "3400";
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
ThreadPoolExecutor dtpExecutor2;
|
||||||
|
|
||||||
|
|
||||||
|
private Logger logger = LoggerFactory.getLogger(RealConsumer.class);
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void run(String... args) throws Exception {
|
||||||
|
kafkaServers = "127.0.0.1:9092";
|
||||||
|
topics = "topic.send.2,topic.send.3,topic.send.4,topic.send.5,topic.send.8";
|
||||||
|
groupId = "group_ruansi_xuancheng";
|
||||||
|
cityCode = "3418";
|
||||||
|
if(args.length > 0){
|
||||||
|
kafkaServers = args[0];
|
||||||
|
topics = args[1];
|
||||||
|
groupId = args[2];
|
||||||
|
cityCode = args[3];
|
||||||
|
|
||||||
|
}
|
||||||
|
ExecutorService executorService = Executors.newSingleThreadExecutor();
|
||||||
|
Map kafkaProp = getKafkaProp();
|
||||||
|
|
||||||
|
checkNetworkConnection("53.1.213.25",21007);
|
||||||
|
if (false)
|
||||||
|
{
|
||||||
|
try
|
||||||
|
{
|
||||||
|
logger.info("Securitymode start.");
|
||||||
|
|
||||||
|
//!!注意,安全认证时,需要用户手动修改为自己申请的机机账号
|
||||||
|
//认证方式 SASL_PLAINTEXT 或者 PLAINTEXT
|
||||||
|
kafkaProp.put("security.protocol","SASL_PLAINTEXT");
|
||||||
|
//服务名
|
||||||
|
kafkaProp.put("sasl.kerberos.service.name","kafka");
|
||||||
|
//域名
|
||||||
|
kafkaProp.put("kerberos.domain.name","hadoop.hadoop.com");
|
||||||
|
LoginUtil.setJaasFile("","");
|
||||||
|
}
|
||||||
|
catch (IOException e)
|
||||||
|
{
|
||||||
|
logger.error("Security prepare failure.");
|
||||||
|
logger.error("The IOException occured.", e);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
logger.info("Security prepare success.");
|
||||||
|
}
|
||||||
|
|
||||||
|
KafkaConsumerRunnable runnable = new KafkaConsumerRunnable(kafkaProp,dtpExecutor2,cityCode);
|
||||||
|
executorService.execute(runnable);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
private void checkNetworkConnection(String host, int port) {
|
||||||
|
try (Socket socket = new Socket()) {
|
||||||
|
socket.connect(new InetSocketAddress(host, port), 3000);
|
||||||
|
logger.info("✅ 网络连接正常: {}:{}", host, port);
|
||||||
|
} catch (IOException e) {
|
||||||
|
logger.error("🚨 无法连接到 {}:{} - {}", host, port, e.getMessage());
|
||||||
|
// 详细错误分析
|
||||||
|
if (e instanceof ConnectException) {
|
||||||
|
logger.error("请检查: 1. Kafka服务状态 2. 防火墙设置 3. 端口是否正确");
|
||||||
|
} else if (e instanceof UnknownHostException) {
|
||||||
|
logger.error("主机名解析失败,请检查DNS或hosts文件");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 获取kafka配置
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
private Map<String, Object> getKafkaProp() {
|
||||||
|
// Properties map = new Properties();
|
||||||
|
Map<String, Object> map = new HashMap<>();
|
||||||
|
map.put("bootstrap.servers",kafkaServers);
|
||||||
|
map.put("group.id",groupId);
|
||||||
|
map.put("enable.auto.commit", "true");
|
||||||
|
map.put("auto.commit.interval.ms", "1000");
|
||||||
|
map.put("session.timeout.ms", "30000");
|
||||||
|
map.put("key.deserializer", StringDeserializer.class);
|
||||||
|
map.put("value.deserializer", StringDeserializer.class);
|
||||||
|
map.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG,5);
|
||||||
|
// map.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG,1000 * 5);
|
||||||
|
// map.put("ack.mode", "manual_immediate");
|
||||||
|
|
||||||
|
// //认证方式 SASL_PLAINTEXT 或者 PLAINTEXT
|
||||||
|
// map.put("security.protocol","SASL_PLAINTEXT");
|
||||||
|
// //服务名
|
||||||
|
// map.put("sasl.kerberos.service.name","kafka");
|
||||||
|
// //域名
|
||||||
|
// map.put("kerberos.domain.name","hadoop.hadoop.com");
|
||||||
|
String[] split = topics.split(",");
|
||||||
|
List list = CollectionUtils.arrayToList(split);
|
||||||
|
map.put("topics", list);
|
||||||
|
return map;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,52 @@
|
||||||
|
package org.dromara.data2kafka.domain;
|
||||||
|
|
||||||
|
import com.fasterxml.jackson.annotation.JsonFormat;
|
||||||
|
import lombok.Data;
|
||||||
|
|
||||||
|
import java.io.Serializable;
|
||||||
|
import java.util.Date;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* <p>description: </p>
|
||||||
|
* gps定位信息(es表)
|
||||||
|
* @author chenle
|
||||||
|
* @date 2021-05-14 9:39
|
||||||
|
*/
|
||||||
|
@Data
|
||||||
|
public class EsGpsInfo implements Serializable {
|
||||||
|
|
||||||
|
private static final long serialVersionUID = 7455495841680488351L;
|
||||||
|
/**
|
||||||
|
* 唯一码(外部系统)合肥版本不需要 21位id,
|
||||||
|
* 到时候上传省厅的时候 需要在kafka发送端处理,生成一个省厅需要的21位id
|
||||||
|
*/
|
||||||
|
private String deviceCode;
|
||||||
|
/**
|
||||||
|
* 类型
|
||||||
|
*/
|
||||||
|
private String deviceType;
|
||||||
|
private String lat;
|
||||||
|
private String lng;
|
||||||
|
//方向
|
||||||
|
private String orientation;
|
||||||
|
//高程
|
||||||
|
private String height;
|
||||||
|
//精度
|
||||||
|
private String deltaH;
|
||||||
|
private String speed;
|
||||||
|
|
||||||
|
private String zzjgdm;
|
||||||
|
private String zzjgmc;
|
||||||
|
private String policeNo;
|
||||||
|
private String policeName;
|
||||||
|
private String phoneNum;
|
||||||
|
private String carNum;
|
||||||
|
|
||||||
|
private Integer online;
|
||||||
|
|
||||||
|
@JsonFormat(pattern="yyyy-MM-dd HH:mm:ss",timezone="GMT+8")
|
||||||
|
private Date gpsTime;
|
||||||
|
//3401,3402等地市代码
|
||||||
|
private String infoSource;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,41 @@
|
||||||
|
package org.dromara.data2kafka.domain;
|
||||||
|
|
||||||
|
import lombok.Data;
|
||||||
|
|
||||||
|
import java.io.Serializable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* <p>description: </p>
|
||||||
|
*
|
||||||
|
* @author chenle
|
||||||
|
* @date 2022-04-16 14:59
|
||||||
|
*/
|
||||||
|
@Data
|
||||||
|
public class EsGpsInfoVO implements Serializable {
|
||||||
|
/**
|
||||||
|
* 设备串号,设备唯一值
|
||||||
|
*/
|
||||||
|
private String deviceCode;
|
||||||
|
private String latitude;
|
||||||
|
private String longitude;
|
||||||
|
//方向
|
||||||
|
private String direction;
|
||||||
|
//高程
|
||||||
|
private String height;
|
||||||
|
//精度
|
||||||
|
private String speed;
|
||||||
|
|
||||||
|
private String gpsTime;
|
||||||
|
|
||||||
|
private String zzjgdm;
|
||||||
|
|
||||||
|
private String zzjgmc;
|
||||||
|
|
||||||
|
private String policeNo;
|
||||||
|
|
||||||
|
private String policeName;
|
||||||
|
|
||||||
|
private String carNum;
|
||||||
|
|
||||||
|
private Integer online;
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,66 @@
|
||||||
|
package org.dromara.data2kafka.producer;
|
||||||
|
|
||||||
|
import com.alibaba.fastjson.JSONObject;
|
||||||
|
import org.apache.kafka.clients.producer.KafkaProducer;
|
||||||
|
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
|
import javax.annotation.Resource;
|
||||||
|
import java.util.concurrent.ExecutionException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* <p>description: </p>
|
||||||
|
*
|
||||||
|
* @author chenle
|
||||||
|
* @date 2021-11-01 17:20
|
||||||
|
*/
|
||||||
|
//@Component
|
||||||
|
public class NewProducer {
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
@Resource(name = "myKafkaProducer")
|
||||||
|
KafkaProducer kafkaProducer;
|
||||||
|
|
||||||
|
private Logger LOG = LoggerFactory.getLogger(NewProducer.class);
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 生产者线程执行函数,循环发送消息。
|
||||||
|
*/
|
||||||
|
public void send(Object obj,String topic) {
|
||||||
|
String obj2String = JSONObject.toJSONString(obj);
|
||||||
|
|
||||||
|
// 构造消息记录
|
||||||
|
ProducerRecord<String, String> record = new ProducerRecord<String, String>(topic, obj2String);
|
||||||
|
try {
|
||||||
|
// 同步发送
|
||||||
|
Object o = kafkaProducer.send(record).get();
|
||||||
|
LOG.info("同步发送成功: Object={}", JSONObject.toJSONString(o));
|
||||||
|
} catch (InterruptedException ie) {
|
||||||
|
ie.printStackTrace();
|
||||||
|
LOG.error("The InterruptedException occured : {}.", ie);
|
||||||
|
} catch (ExecutionException ee) {
|
||||||
|
ee.printStackTrace();
|
||||||
|
LOG.error("The ExecutionException occured : {}.", ee);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*kafkaProducer.send(record, (recordMetadata, e) -> {
|
||||||
|
if (e != null) {
|
||||||
|
LOG.error("send--The Exception occured.", e);
|
||||||
|
}
|
||||||
|
if (recordMetadata != null)
|
||||||
|
{
|
||||||
|
LOG.info("sent to partition(" + recordMetadata.partition() + "), "
|
||||||
|
+ "offset(" + recordMetadata.offset()+"),topic="+recordMetadata.topic());
|
||||||
|
}
|
||||||
|
});*/
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,215 @@
|
||||||
|
package org.dromara.data2kafka.producer;
|
||||||
|
|
||||||
|
import com.alibaba.fastjson.JSONObject;
|
||||||
|
import org.apache.kafka.clients.producer.Callback;
|
||||||
|
import org.apache.kafka.clients.producer.KafkaProducer;
|
||||||
|
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||||
|
import org.apache.kafka.clients.producer.RecordMetadata;
|
||||||
|
import org.dromara.data2kafka.config.KafkaProperties;
|
||||||
|
import org.dromara.data2kafka.config.LoginUtil;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
import org.springframework.context.annotation.Bean;
|
||||||
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Properties;
|
||||||
|
import java.util.concurrent.ExecutionException;
|
||||||
|
import java.util.concurrent.Future;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* <p>description: </p>
|
||||||
|
*
|
||||||
|
* @author chenle
|
||||||
|
* @date 2021-11-03 14:15
|
||||||
|
*/
|
||||||
|
@Component
|
||||||
|
public class Producer {
|
||||||
|
|
||||||
|
private static final Logger logger = LoggerFactory.getLogger(Producer.class);
|
||||||
|
|
||||||
|
private final KafkaProducer<String, String> producer;
|
||||||
|
|
||||||
|
// 私有静态实例(volatile 保证可见性和有序性)
|
||||||
|
private static volatile Producer instance;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
private final Boolean isAsync = true;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
// Broker地址列表
|
||||||
|
private final static String BOOTSTRAP_SERVER = "bootstrap.servers";
|
||||||
|
|
||||||
|
// 客户端ID
|
||||||
|
private final static String CLIENT_ID = "client.id";
|
||||||
|
|
||||||
|
// Key序列化类
|
||||||
|
private final static String KEY_SERIALIZER = "key.serializer";
|
||||||
|
|
||||||
|
// Value序列化类
|
||||||
|
private final static String VALUE_SERIALIZER = "value.serializer";
|
||||||
|
|
||||||
|
// 协议类型:当前支持配置为SASL_PLAINTEXT或者PLAINTEXT
|
||||||
|
private final static String SECURITY_PROTOCOL = "security.protocol";
|
||||||
|
|
||||||
|
// 服务名
|
||||||
|
private final static String SASL_KERBEROS_SERVICE_NAME = "sasl.kerberos.service.name";
|
||||||
|
|
||||||
|
// 域名
|
||||||
|
private final static String KERBEROS_DOMAIN_NAME = "kerberos.domain.name";
|
||||||
|
|
||||||
|
// 分区类名
|
||||||
|
private final static String PARTITIONER_NAME = "partitioner.class";
|
||||||
|
|
||||||
|
// 默认发送100条消息
|
||||||
|
private final static int MESSAGE_NUM = 100;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 用户自己申请的机机账号keytab文件名称
|
||||||
|
*/
|
||||||
|
private static final String USER_KEYTAB_FILE = "user.keytab";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 用户自己申请的机机账号名称
|
||||||
|
*/
|
||||||
|
private static final String USER_PRINCIPAL = "yhy_ahrs_rcw";
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Producer constructor
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
public Producer() {
|
||||||
|
initSecurity();
|
||||||
|
Properties props = initProperties();
|
||||||
|
this.producer = new KafkaProducer<>(props);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 获取单例实例的公共方法(双重校验锁)
|
||||||
|
public static Producer getInstance() {
|
||||||
|
if (instance == null) {
|
||||||
|
synchronized (Producer.class) {
|
||||||
|
if (instance == null) {
|
||||||
|
instance = new Producer();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return instance;
|
||||||
|
}
|
||||||
|
|
||||||
|
// 添加 ShutdownHook 确保资源释放(推荐)
|
||||||
|
static {
|
||||||
|
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
|
||||||
|
if (instance != null && instance.producer != null) {
|
||||||
|
instance.producer.close();
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 初始化安全认证
|
||||||
|
*/
|
||||||
|
public void initSecurity() {
|
||||||
|
if (LoginUtil.isSecurityModel())
|
||||||
|
{
|
||||||
|
try {
|
||||||
|
logger.info("Securitymode start.");
|
||||||
|
|
||||||
|
// !!注意,安全认证时,需要用户手动修改为自己申请的机机账号
|
||||||
|
LoginUtil.securityPrepare(USER_PRINCIPAL, USER_KEYTAB_FILE);
|
||||||
|
} catch (IOException e) {
|
||||||
|
logger.error("Security prepare failure.");
|
||||||
|
logger.error("The IOException occured.", e);
|
||||||
|
}
|
||||||
|
logger.info("Security prepare success.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static Properties initProperties() {
|
||||||
|
Properties props = new Properties();
|
||||||
|
KafkaProperties kafkaProc = KafkaProperties.getInstance();
|
||||||
|
|
||||||
|
// Broker地址列表
|
||||||
|
props.put(BOOTSTRAP_SERVER, kafkaProc.getValues(BOOTSTRAP_SERVER, "localhost:21007"));
|
||||||
|
// 客户端ID
|
||||||
|
props.put(CLIENT_ID, kafkaProc.getValues(CLIENT_ID, "DemoProducer"));
|
||||||
|
// Key序列化类
|
||||||
|
props.put(KEY_SERIALIZER,
|
||||||
|
kafkaProc.getValues(KEY_SERIALIZER, "org.apache.kafka.common.serialization.StringSerializer"));
|
||||||
|
// Value序列化类
|
||||||
|
props.put(VALUE_SERIALIZER,
|
||||||
|
kafkaProc.getValues(VALUE_SERIALIZER, "org.apache.kafka.common.serialization.StringSerializer"));
|
||||||
|
// 协议类型:当前支持配置为SASL_PLAINTEXT或者PLAINTEXT
|
||||||
|
props.put(SECURITY_PROTOCOL, kafkaProc.getValues(SECURITY_PROTOCOL, "SASL_PLAINTEXT"));
|
||||||
|
// 服务名
|
||||||
|
props.put(SASL_KERBEROS_SERVICE_NAME, "kafka");
|
||||||
|
// 域名
|
||||||
|
props.put(KERBEROS_DOMAIN_NAME, kafkaProc.getValues(KERBEROS_DOMAIN_NAME, "hadoop.hadoop.com"));
|
||||||
|
// 分区类名
|
||||||
|
// props.put(PARTITIONER_NAME, kafkaProc.getValues(PARTITIONER_NAME, "com.huawei.bigdata.kafka.example.SimplePartitioner"));
|
||||||
|
return props;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 发送消息(核心方法)
|
||||||
|
*
|
||||||
|
* @param topic
|
||||||
|
* @param message 消息内容
|
||||||
|
* @return 同步发送时返回 RecordMetadata,异步发送返回 null
|
||||||
|
*/
|
||||||
|
public RecordMetadata sendMessage(String topic, String message) {
|
||||||
|
try {
|
||||||
|
logger.info("调用发送:topic={}, Object={}",topic,message );
|
||||||
|
long startTime = System.currentTimeMillis();
|
||||||
|
ProducerRecord<String, String> record = new ProducerRecord<>(topic, message);
|
||||||
|
if (isAsync) {
|
||||||
|
// 异步发送
|
||||||
|
producer.send(record, new DemoCallBack(startTime,topic, message));
|
||||||
|
return null;
|
||||||
|
} else {
|
||||||
|
Future<RecordMetadata> future = producer.send(record);
|
||||||
|
logger.info("同步发送成功: Object={}", future.get().topic());
|
||||||
|
return future.get();
|
||||||
|
|
||||||
|
}
|
||||||
|
}catch (Exception e){
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* 内部回调类
|
||||||
|
*/
|
||||||
|
private static class DemoCallBack implements Callback {
|
||||||
|
private final Logger logger = LoggerFactory.getLogger(DemoCallBack.class);
|
||||||
|
private final long startTime;
|
||||||
|
|
||||||
|
private final String topic;
|
||||||
|
private final String message;
|
||||||
|
|
||||||
|
public DemoCallBack(long startTime, String topic, String message) {
|
||||||
|
this.startTime = startTime;
|
||||||
|
this.topic = topic;
|
||||||
|
this.message = message;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onCompletion(RecordMetadata metadata, Exception exception) {
|
||||||
|
long elapsedTime = System.currentTimeMillis() - startTime;
|
||||||
|
if (metadata != null) {
|
||||||
|
logger.info("topic=({}, {}) sent to partition({}), offset({}) in {} ms",
|
||||||
|
topic, message, metadata.partition(), metadata.offset(), elapsedTime);
|
||||||
|
} else if (exception != null) {
|
||||||
|
logger.error("Message sending failed", exception);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,34 @@
|
||||||
|
# Tomcat
|
||||||
|
server:
|
||||||
|
port: 9212
|
||||||
|
|
||||||
|
# Spring
|
||||||
|
spring:
|
||||||
|
application:
|
||||||
|
# 应用名称
|
||||||
|
name: stwzhj-data2StKafka
|
||||||
|
profiles:
|
||||||
|
# 环境配置
|
||||||
|
active: @profiles.active@
|
||||||
|
autoconfigure:
|
||||||
|
exclude: org.springframework.boot.autoconfigure.elasticsearch.ElasticsearchRestClientAutoConfiguration
|
||||||
|
--- # nacos 配置
|
||||||
|
spring:
|
||||||
|
cloud:
|
||||||
|
nacos:
|
||||||
|
# nacos 服务地址
|
||||||
|
server-addr: @nacos.server@
|
||||||
|
username: @nacos.username@
|
||||||
|
password: @nacos.password@
|
||||||
|
discovery:
|
||||||
|
# 注册组
|
||||||
|
group: @nacos.discovery.group@
|
||||||
|
namespace: ${spring.profiles.active}
|
||||||
|
config:
|
||||||
|
# 配置组
|
||||||
|
group: @nacos.config.group@
|
||||||
|
namespace: ${spring.profiles.active}
|
||||||
|
config:
|
||||||
|
import:
|
||||||
|
- optional:nacos:application-common.yml
|
||||||
|
- optional:nacos:${spring.application.name}.yml
|
||||||
|
|
@ -0,0 +1,6 @@
|
||||||
|
security.protocol = SASL_PLAINTEXT
|
||||||
|
kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||||
|
kafka.client.zookeeper.principal = zookeeper/hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||||
|
bootstrap.servers = 53.1.213.27:21007,53.1.213.26:21007,53.1.213.25:21007
|
||||||
|
zookeeper.ssl.enable = false
|
||||||
|
sasl.kerberos.service.name = kafka
|
||||||
|
|
@ -0,0 +1,6 @@
|
||||||
|
security.protocol = SASL_PLAINTEXT
|
||||||
|
kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||||
|
kafka.client.zookeeper.principal = zookeeper/hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||||
|
bootstrap.servers = 53.1.213.27:21007,53.1.213.26:21007,53.1.213.25:21007
|
||||||
|
zookeeper.ssl.enable = false
|
||||||
|
sasl.kerberos.service.name = kafka
|
||||||
|
|
@ -0,0 +1,21 @@
|
||||||
|
config.storage.topic = connect-configs
|
||||||
|
group.id = connect-cluster
|
||||||
|
status.storage.topic = connect-status
|
||||||
|
bootstrap.servers = 53.1.213.27:21007,53.1.213.26:21007,53.1.213.25:21007
|
||||||
|
internal.key.converter.schemas.enable = false
|
||||||
|
sasl.kerberos.service.name = kafka
|
||||||
|
rest.port = 21010
|
||||||
|
config.storage.replication.factor = 3
|
||||||
|
offset.flush.interval.ms = 10000
|
||||||
|
security.protocol = SASL_PLAINTEXT
|
||||||
|
key.converter.schemas.enable = false
|
||||||
|
internal.key.converter = org.apache.kafka.connect.storage.StringConverter
|
||||||
|
kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||||
|
status.storage.replication.factor = 3
|
||||||
|
internal.value.converter.schemas.enable = false
|
||||||
|
value.converter.schemas.enable = false
|
||||||
|
internal.value.converter = org.apache.kafka.connect.storage.StringConverter
|
||||||
|
offset.storage.replication.factor = 3
|
||||||
|
offset.storage.topic = connect-offsets
|
||||||
|
value.converter = org.apache.kafka.connect.storage.StringConverter
|
||||||
|
key.converter = org.apache.kafka.connect.storage.StringConverter
|
||||||
|
|
@ -0,0 +1,20 @@
|
||||||
|
consumer.sasl.kerberos.service.name = kafka
|
||||||
|
producer.security.protocol = SASL_PLAINTEXT
|
||||||
|
standalone1.key.converter.schemas.enable = false
|
||||||
|
bootstrap.servers = 53.1.213.27:21007,53.1.213.26:21007,53.1.213.25:21007
|
||||||
|
internal.key.converter.schemas.enable = false
|
||||||
|
sasl.kerberos.service.name = kafka
|
||||||
|
offset.flush.interval.ms = 10000
|
||||||
|
security.protocol = SASL_PLAINTEXT
|
||||||
|
internal.key.converter = org.apache.kafka.connect.storage.StringConverter
|
||||||
|
kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||||
|
offset.storage.file.filename = /tmp/connect.offsets
|
||||||
|
producer.kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||||
|
internal.value.converter.schemas.enable = false
|
||||||
|
internal.value.converter = org.apache.kafka.connect.storage.StringConverter
|
||||||
|
value.converter.schemas.enable = false
|
||||||
|
consumer.security.protocol = SASL_PLAINTEXT
|
||||||
|
value.converter = org.apache.kafka.connect.storage.StringConverter
|
||||||
|
key.converter = org.apache.kafka.connect.storage.StringConverter
|
||||||
|
producer.sasl.kerberos.service.name = kafka
|
||||||
|
consumer.kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||||
|
|
@ -0,0 +1,5 @@
|
||||||
|
security.protocol = SASL_PLAINTEXT
|
||||||
|
kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||||
|
group.id = example-group1
|
||||||
|
auto.commit.interval.ms = 60000
|
||||||
|
sasl.kerberos.service.name = kafka
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
cluster.ip.model = IPV4
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
kafka.client.security.mode = yes
|
||||||
|
|
@ -0,0 +1,49 @@
|
||||||
|
[kdcdefaults]
|
||||||
|
kdc_ports = 53.1.213.23:21732
|
||||||
|
kdc_tcp_ports = 53.1.213.23:21732
|
||||||
|
|
||||||
|
[libdefaults]
|
||||||
|
default_realm = A528C942_01A6_1BEF_7A75_0187DC82C40F.COM
|
||||||
|
kdc_timeout = 2500
|
||||||
|
clockskew = 300
|
||||||
|
use_dns_lookup = 0
|
||||||
|
udp_preference_limit = 1465
|
||||||
|
max_retries = 5
|
||||||
|
dns_lookup_kdc = false
|
||||||
|
dns_lookup_realm = false
|
||||||
|
renewable = false
|
||||||
|
forwardable = false
|
||||||
|
renew_lifetime = 0m
|
||||||
|
max_renewable_life = 30m
|
||||||
|
allow_extend_version = false
|
||||||
|
default_ccache_name = FILE:/tmp//krb5cc_%{uid}
|
||||||
|
|
||||||
|
[realms]
|
||||||
|
A528C942_01A6_1BEF_7A75_0187DC82C40F.COM = {
|
||||||
|
kdc = 53.1.213.23:21732
|
||||||
|
kdc = 53.1.213.22:21732
|
||||||
|
admin_server = 53.1.213.22:21730
|
||||||
|
admin_server = 53.1.213.23:21730
|
||||||
|
kpasswd_server = 53.1.213.22:21731
|
||||||
|
kpasswd_server = 53.1.213.23:21731
|
||||||
|
supported_enctypes = aes256-cts-hmac-sha1-96:special aes128-cts-hmac-sha1-96:special
|
||||||
|
kpasswd_port = 21731
|
||||||
|
kadmind_port = 21730
|
||||||
|
kadmind_listen = 53.1.213.23:21730
|
||||||
|
kpasswd_listen = 53.1.213.23:21731
|
||||||
|
renewable = false
|
||||||
|
forwardable = false
|
||||||
|
renew_lifetime = 0m
|
||||||
|
max_renewable_life = 30m
|
||||||
|
acl_file = /opt/huawei/Bigdata/FusionInsight_BASE_8.5.0/install/FusionInsight-kerberos-1.20/kerberos/var/krb5kdc/kadm5.acl
|
||||||
|
dict_file = /opt/huawei/Bigdata/common/runtime0/security/weakPasswdDic/weakPasswdForKdc.ini
|
||||||
|
key_stash_file = /opt/huawei/Bigdata/FusionInsight_BASE_8.5.0/install/FusionInsight-kerberos-1.20/kerberos/var/krb5kdc/.k5.A528C942_01A6_1BEF_7A75_0187DC82C40F.COM
|
||||||
|
}
|
||||||
|
|
||||||
|
[domain_realm]
|
||||||
|
.a528c942_01a6_1bef_7a75_0187dc82c40f.com = A528C942_01A6_1BEF_7A75_0187DC82C40F.COM
|
||||||
|
|
||||||
|
[logging]
|
||||||
|
kdc = SYSLOG:INFO:DAEMON
|
||||||
|
admin_server = SYSLOG:INFO:DAEMON
|
||||||
|
default = SYSLOG:NOTICE:DAEMON
|
||||||
|
|
@ -0,0 +1,5 @@
|
||||||
|
security.protocol = SASL_PLAINTEXT
|
||||||
|
kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||||
|
acks = 1
|
||||||
|
bootstrap.servers = 53.1.213.27:21007,53.1.213.26:21007,53.1.213.25:21007
|
||||||
|
sasl.kerberos.service.name = kafka
|
||||||
|
|
@ -0,0 +1,192 @@
|
||||||
|
log.cleaner.min.compaction.lag.ms = 0
|
||||||
|
quota.producer.default = 9223372036854775807
|
||||||
|
metric.reporters = com.huawei.bigdata.kafka.kafkabalancer.reporter.plugin.CoreMetricReporter
|
||||||
|
offsets.topic.num.partitions = 50
|
||||||
|
log.flush.interval.messages = 9223372036854775807
|
||||||
|
controller.socket.timeout.ms = 30000
|
||||||
|
auto.create.topics.enable = true
|
||||||
|
log.flush.interval.ms = 9223372036854775807
|
||||||
|
actual.broker.id.ip.map =
|
||||||
|
listener.name.sasl_plaintext.plain.sasl.server.callback.handler.class = com.huawei.kafka.plain.PlainCallBackHandler
|
||||||
|
replica.socket.receive.buffer.bytes = 65536
|
||||||
|
min.insync.replicas = 1
|
||||||
|
ssl.enable = false
|
||||||
|
replica.fetch.wait.max.ms = 500
|
||||||
|
num.recovery.threads.per.data.dir = 10
|
||||||
|
ssl.keystore.type = JKS
|
||||||
|
super.users = User:kafka
|
||||||
|
sasl.mechanism.inter.broker.protocol = GSSAPI
|
||||||
|
default.replication.factor = 2
|
||||||
|
log.preallocate = false
|
||||||
|
sasl.kerberos.principal.to.local.rules = RULE:[2:$1@$0](.*@.*)s/@.*//,RULE:[1:$1@$0](.*@*.COM)s/@.*//,DEFAULT
|
||||||
|
metrics.reporter.topic.replicas = 3
|
||||||
|
actual.broker.id.port.map =
|
||||||
|
fetch.purgatory.purge.interval.requests = 1000
|
||||||
|
replica.socket.timeout.ms = 30000
|
||||||
|
message.max.bytes = 100001200
|
||||||
|
max.connections.per.user = 2147483647
|
||||||
|
transactional.id.expiration.ms = 604800000
|
||||||
|
control.plane.listener.name = TRACE
|
||||||
|
transaction.state.log.replication.factor = 3
|
||||||
|
num.io.threads = 8
|
||||||
|
monitor.zk.ssl.connect = 53.1.213.24:24002,53.1.213.23:24002,53.1.213.22:24002
|
||||||
|
offsets.commit.required.acks = -1
|
||||||
|
log.flush.offset.checkpoint.interval.ms = 60000
|
||||||
|
quota.window.size.seconds = 1
|
||||||
|
delete.topic.enable = true
|
||||||
|
ssl.truststore.type = JKS
|
||||||
|
offsets.commit.timeout.ms = 5000
|
||||||
|
quota.window.num = 11
|
||||||
|
log.partition.strategy = count
|
||||||
|
zookeeper.connect = 53.1.213.24:24002,53.1.213.23:24002,53.1.213.22:24002/kafka
|
||||||
|
authorizer.class.name = org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer
|
||||||
|
auto.reassign.check.interval.ms = 600000
|
||||||
|
user.group.cache.timeout.sec = 300
|
||||||
|
auto.reassign.enable = true
|
||||||
|
num.replica.fetchers = 1
|
||||||
|
alter.log.dirs.replication.quota.window.size.seconds = 1
|
||||||
|
allow.everyone.if.no.acl.found = false
|
||||||
|
ip.mode = IPV4
|
||||||
|
alter.log.dirs.replication.quota.window.num = 11
|
||||||
|
log.roll.jitter.hours = 0
|
||||||
|
tmp.zookeeper.connect = 53.1.213.24:24002,53.1.213.23:24002,53.1.213.22:24002
|
||||||
|
log.cleaner.enable = true
|
||||||
|
offsets.load.buffer.size = 5242880
|
||||||
|
log.cleaner.delete.retention.ms = 86400000
|
||||||
|
ssl.client.auth = none
|
||||||
|
controlled.shutdown.max.retries = 3
|
||||||
|
queued.max.requests = 500
|
||||||
|
metrics.reporter.max.request.size = 104857600
|
||||||
|
offsets.topic.replication.factor = 3
|
||||||
|
log.cleaner.threads = 1
|
||||||
|
transaction.state.log.min.isr = 2
|
||||||
|
sasl.kerberos.service.name = kafka
|
||||||
|
sasl.kerberos.ticket.renew.jitter = 0.05
|
||||||
|
socket.request.max.bytes = 104857600
|
||||||
|
zookeeper.session.timeout.ms = 45000
|
||||||
|
log.retention.bytes = -1
|
||||||
|
log.message.timestamp.type = CreateTime
|
||||||
|
request.total.time.ms.threshold = 30000
|
||||||
|
sasl.kerberos.min.time.before.relogin = 60000
|
||||||
|
zookeeper.set.acl = true
|
||||||
|
connections.max.idle.ms = 600000
|
||||||
|
offsets.retention.minutes = 10080
|
||||||
|
delegation.token.expiry.time.ms = 86400000
|
||||||
|
max.connections = 2147483647
|
||||||
|
is.security.mode = yes
|
||||||
|
transaction.state.log.num.partitions = 50
|
||||||
|
inter.broker.protocol.version = 3.6-IV1
|
||||||
|
replica.fetch.backoff.ms = 1000
|
||||||
|
kafka.metrics.reporters = com.huawei.kafka.PartitionStatusReporter
|
||||||
|
listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL,TRACE:SASL_PLAINTEXT
|
||||||
|
log.retention.hours = 168
|
||||||
|
num.partitions = 2
|
||||||
|
listeners = SASL_PLAINTEXT://53.1.213.25:21007,PLAINTEXT://53.1.213.25:21005,SSL://53.1.213.25:21008,SASL_SSL://53.1.213.25:21009,TRACE://53.1.213.25:21013
|
||||||
|
ssl.enabled.protocols = TLSv1.2
|
||||||
|
delete.records.purgatory.purge.interval.requests = 1
|
||||||
|
monitor.zk.normal.connect = 53.1.213.24:24002,53.1.213.23:24002,53.1.213.22:24002
|
||||||
|
ssl.cipher.suites = TLS_DHE_DSS_WITH_AES_128_GCM_SHA256,TLS_DHE_DSS_WITH_AES_256_GCM_SHA384,TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
|
||||||
|
log.flush.scheduler.interval.ms = 9223372036854775807
|
||||||
|
sasl.port = 21007
|
||||||
|
ssl.mode.enable = true
|
||||||
|
security.protocol = SASL_PLAINTEXT
|
||||||
|
log.index.size.max.bytes = 10485760
|
||||||
|
rack.aware.enable = false
|
||||||
|
security.inter.broker.protocol = SASL_PLAINTEXT
|
||||||
|
replica.fetch.max.bytes = 104857600
|
||||||
|
log.cleaner.dedupe.buffer.size = 134217728
|
||||||
|
replica.high.watermark.checkpoint.interval.ms = 5000
|
||||||
|
replication.quota.window.size.seconds = 1
|
||||||
|
log.cleaner.io.buffer.size = 524288
|
||||||
|
sasl.kerberos.ticket.renew.window.factor = 0.8
|
||||||
|
metrics.reporter.zookeeper.url = 53.1.213.24:24002,53.1.213.23:24002,53.1.213.22:24002/kafka
|
||||||
|
max.connections.per.user.enable = true
|
||||||
|
bootstrap.servers = 53.1.213.27:21007,53.1.213.26:21007,53.1.213.25:21007
|
||||||
|
metrics.reporter.sasl.kerberos.service.name = kafka
|
||||||
|
zookeeper.connection.timeout.ms = 45000
|
||||||
|
metrics.recording.level = INFO
|
||||||
|
metrics.reporter.bootstrap.servers = 53.1.213.27:21009,53.1.213.26:21009,53.1.213.25:21009
|
||||||
|
controlled.shutdown.retry.backoff.ms = 5000
|
||||||
|
sasl-ssl.port = 21009
|
||||||
|
advertised.broker.id.port.map =
|
||||||
|
listener.name.sasl_ssl.plain.sasl.server.callback.handler.class = com.huawei.kafka.plain.PlainCallBackHandler
|
||||||
|
log.roll.hours = 168
|
||||||
|
log.cleanup.policy = delete
|
||||||
|
log.flush.start.offset.checkpoint.interval.ms = 60000
|
||||||
|
host.name = 53.1.213.25
|
||||||
|
max.connections.per.user.overrides =
|
||||||
|
max.connections.per.user.whitelist = kafka,default#principal
|
||||||
|
transaction.state.log.segment.bytes = 104857600
|
||||||
|
max.connections.per.ip = 2147483647
|
||||||
|
offsets.topic.segment.bytes = 104857600
|
||||||
|
background.threads = 10
|
||||||
|
quota.consumer.default = 9223372036854775807
|
||||||
|
request.timeout.ms = 30000
|
||||||
|
log.message.format.version = 3.6-IV1
|
||||||
|
group.initial.rebalance.delay.ms = 3000
|
||||||
|
log.index.interval.bytes = 4096
|
||||||
|
log.segment.bytes = 1073741824
|
||||||
|
log.cleaner.backoff.ms = 15000
|
||||||
|
kafka.zookeeper.root = /kafka
|
||||||
|
offset.metadata.max.bytes = 4096
|
||||||
|
ssl.truststore.location = #{conf_dir}/truststore.jks
|
||||||
|
group.max.session.timeout.ms = 1800000
|
||||||
|
replica.fetch.response.max.bytes = 104857600
|
||||||
|
port = 21005
|
||||||
|
zookeeper.sync.time.ms = 2000
|
||||||
|
log.segment.delete.delay.ms = 60000
|
||||||
|
ssl.port = 21008
|
||||||
|
fetch.max.bytes = 115343360
|
||||||
|
user.group.query.retry.backoff.ms = 300
|
||||||
|
log.dirs = /srv/BigData/kafka/data1/kafka-logs,/srv/BigData/kafka/data2/kafka-logs,/srv/BigData/kafka/data3/kafka-logs,/srv/BigData/kafka/data4/kafka-logs
|
||||||
|
monitor.keytab = /opt/huawei/Bigdata/om-agent/nodeagent/etc/agent/omm.keytab
|
||||||
|
controlled.shutdown.enable = true
|
||||||
|
az.aware.enable = false
|
||||||
|
compression.type = producer
|
||||||
|
max.connections.per.ip.overrides =
|
||||||
|
log.message.timestamp.difference.max.ms = 9223372036854775807
|
||||||
|
metrics.reporter.kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||||
|
kafka.metrics.polling.interval.secs = 60
|
||||||
|
advertised.listeners.protocol = SASL_SSL
|
||||||
|
sasl.kerberos.kinit.cmd = /opt/huawei/Bigdata/FusionInsight_BASE_8.5.0/install/FusionInsight-kerberos-1.20/kerberos/bin/kinit
|
||||||
|
transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000
|
||||||
|
log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
|
||||||
|
auto.leader.rebalance.enable = true
|
||||||
|
leader.imbalance.check.interval.seconds = 3600
|
||||||
|
log.cleaner.min.cleanable.ratio = 0.5
|
||||||
|
user.group.query.retry = 10
|
||||||
|
replica.lag.time.max.ms = 60000
|
||||||
|
max.incremental.fetch.session.cache.slots = 1000
|
||||||
|
delegation.token.master.key = null
|
||||||
|
num.network.threads = 6
|
||||||
|
reserved.broker.max.id = 65535
|
||||||
|
listener.name.external_sasl_plaintext.plain.sasl.server.callback.handler.class = com.huawei.kafka.plain.PlainCallBackHandler
|
||||||
|
monitor.principal = oms/manager@A528C942_01A6_1BEF_7A75_0187DC82C40F.COM
|
||||||
|
transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
|
||||||
|
socket.send.buffer.bytes = 1024000
|
||||||
|
log.message.downconversion.enable = true
|
||||||
|
advertised.broker.id.ip.map =
|
||||||
|
metrics.reporter.security.protocol = SASL_SSL
|
||||||
|
transaction.state.log.load.buffer.size = 5242880
|
||||||
|
socket.receive.buffer.bytes = 1024000
|
||||||
|
ssl.keystore.location = #{conf_dir}/kafka_broker.jks
|
||||||
|
replica.fetch.min.bytes = 1
|
||||||
|
broker.rack = /default/rack0
|
||||||
|
controller.port = 21013
|
||||||
|
unclean.leader.election.enable = false
|
||||||
|
sasl.enabled.mechanisms = GSSAPI,PLAIN
|
||||||
|
group.min.session.timeout.ms = 6000
|
||||||
|
offsets.retention.check.interval.ms = 600000
|
||||||
|
log.cleaner.io.buffer.load.factor = 0.9
|
||||||
|
transaction.max.timeout.ms = 900000
|
||||||
|
kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||||
|
producer.purgatory.purge.interval.requests = 1000
|
||||||
|
group.max.size = 2147483647
|
||||||
|
broker.id = 1
|
||||||
|
offsets.topic.compression.codec = 0
|
||||||
|
delegation.token.max.lifetime.ms = 604800000
|
||||||
|
replication.quota.window.num = 11
|
||||||
|
enable.advertised.listener = false
|
||||||
|
log.retention.check.interval.ms = 300000
|
||||||
|
leader.imbalance.per.broker.percentage = 10
|
||||||
|
queued.max.request.bytes = -1
|
||||||
Binary file not shown.
|
|
@ -0,0 +1,21 @@
|
||||||
|
config.storage.topic = connect-configs
|
||||||
|
group.id = connect-cluster
|
||||||
|
status.storage.topic = connect-status
|
||||||
|
bootstrap.servers = 53.1.213.27:21007,53.1.213.26:21007,53.1.213.25:21007
|
||||||
|
internal.key.converter.schemas.enable = false
|
||||||
|
sasl.kerberos.service.name = kafka
|
||||||
|
rest.port = 21010
|
||||||
|
config.storage.replication.factor = 3
|
||||||
|
offset.flush.interval.ms = 10000
|
||||||
|
security.protocol = SASL_PLAINTEXT
|
||||||
|
key.converter.schemas.enable = false
|
||||||
|
internal.key.converter = org.apache.kafka.connect.storage.StringConverter
|
||||||
|
kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||||
|
status.storage.replication.factor = 3
|
||||||
|
internal.value.converter.schemas.enable = false
|
||||||
|
value.converter.schemas.enable = false
|
||||||
|
internal.value.converter = org.apache.kafka.connect.storage.StringConverter
|
||||||
|
offset.storage.replication.factor = 3
|
||||||
|
offset.storage.topic = connect-offsets
|
||||||
|
value.converter = org.apache.kafka.connect.storage.StringConverter
|
||||||
|
key.converter = org.apache.kafka.connect.storage.StringConverter
|
||||||
|
|
@ -0,0 +1,20 @@
|
||||||
|
consumer.sasl.kerberos.service.name = kafka
|
||||||
|
producer.security.protocol = SASL_PLAINTEXT
|
||||||
|
standalone1.key.converter.schemas.enable = false
|
||||||
|
bootstrap.servers = 53.1.213.27:21007,53.1.213.26:21007,53.1.213.25:21007
|
||||||
|
internal.key.converter.schemas.enable = false
|
||||||
|
sasl.kerberos.service.name = kafka
|
||||||
|
offset.flush.interval.ms = 10000
|
||||||
|
security.protocol = SASL_PLAINTEXT
|
||||||
|
internal.key.converter = org.apache.kafka.connect.storage.StringConverter
|
||||||
|
kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||||
|
offset.storage.file.filename = /tmp/connect.offsets
|
||||||
|
producer.kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||||
|
internal.value.converter.schemas.enable = false
|
||||||
|
internal.value.converter = org.apache.kafka.connect.storage.StringConverter
|
||||||
|
value.converter.schemas.enable = false
|
||||||
|
consumer.security.protocol = SASL_PLAINTEXT
|
||||||
|
value.converter = org.apache.kafka.connect.storage.StringConverter
|
||||||
|
key.converter = org.apache.kafka.connect.storage.StringConverter
|
||||||
|
producer.sasl.kerberos.service.name = kafka
|
||||||
|
consumer.kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||||
|
|
@ -0,0 +1,5 @@
|
||||||
|
security.protocol = SASL_PLAINTEXT
|
||||||
|
kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||||
|
group.id = example-group1
|
||||||
|
auto.commit.interval.ms = 60000
|
||||||
|
sasl.kerberos.service.name = kafka
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
cluster.ip.model = IPV4
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
kafka.client.security.mode = yes
|
||||||
|
|
@ -0,0 +1,49 @@
|
||||||
|
[kdcdefaults]
|
||||||
|
kdc_ports = 53.1.213.23:21732
|
||||||
|
kdc_tcp_ports = 53.1.213.23:21732
|
||||||
|
|
||||||
|
[libdefaults]
|
||||||
|
default_realm = A528C942_01A6_1BEF_7A75_0187DC82C40F.COM
|
||||||
|
kdc_timeout = 2500
|
||||||
|
clockskew = 300
|
||||||
|
use_dns_lookup = 0
|
||||||
|
udp_preference_limit = 1465
|
||||||
|
max_retries = 5
|
||||||
|
dns_lookup_kdc = false
|
||||||
|
dns_lookup_realm = false
|
||||||
|
renewable = false
|
||||||
|
forwardable = false
|
||||||
|
renew_lifetime = 0m
|
||||||
|
max_renewable_life = 30m
|
||||||
|
allow_extend_version = false
|
||||||
|
default_ccache_name = FILE:/tmp//krb5cc_%{uid}
|
||||||
|
|
||||||
|
[realms]
|
||||||
|
A528C942_01A6_1BEF_7A75_0187DC82C40F.COM = {
|
||||||
|
kdc = 53.1.213.23:21732
|
||||||
|
kdc = 53.1.213.22:21732
|
||||||
|
admin_server = 53.1.213.22:21730
|
||||||
|
admin_server = 53.1.213.23:21730
|
||||||
|
kpasswd_server = 53.1.213.22:21731
|
||||||
|
kpasswd_server = 53.1.213.23:21731
|
||||||
|
supported_enctypes = aes256-cts-hmac-sha1-96:special aes128-cts-hmac-sha1-96:special
|
||||||
|
kpasswd_port = 21731
|
||||||
|
kadmind_port = 21730
|
||||||
|
kadmind_listen = 53.1.213.23:21730
|
||||||
|
kpasswd_listen = 53.1.213.23:21731
|
||||||
|
renewable = false
|
||||||
|
forwardable = false
|
||||||
|
renew_lifetime = 0m
|
||||||
|
max_renewable_life = 30m
|
||||||
|
acl_file = /opt/huawei/Bigdata/FusionInsight_BASE_8.5.0/install/FusionInsight-kerberos-1.20/kerberos/var/krb5kdc/kadm5.acl
|
||||||
|
dict_file = /opt/huawei/Bigdata/common/runtime0/security/weakPasswdDic/weakPasswdForKdc.ini
|
||||||
|
key_stash_file = /opt/huawei/Bigdata/FusionInsight_BASE_8.5.0/install/FusionInsight-kerberos-1.20/kerberos/var/krb5kdc/.k5.A528C942_01A6_1BEF_7A75_0187DC82C40F.COM
|
||||||
|
}
|
||||||
|
|
||||||
|
[domain_realm]
|
||||||
|
.a528c942_01a6_1bef_7a75_0187dc82c40f.com = A528C942_01A6_1BEF_7A75_0187DC82C40F.COM
|
||||||
|
|
||||||
|
[logging]
|
||||||
|
kdc = SYSLOG:INFO:DAEMON
|
||||||
|
admin_server = SYSLOG:INFO:DAEMON
|
||||||
|
default = SYSLOG:NOTICE:DAEMON
|
||||||
|
|
@ -0,0 +1,49 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<configuration scan="true" scanPeriod="60 seconds" debug="false">
|
||||||
|
<!-- 日志存放路径 -->
|
||||||
|
<property name="log.path" value="logs" />
|
||||||
|
<property name="log.file" value="data2stKafka" />
|
||||||
|
<property name="MAX_FILE_SIZE" value="50MB" />
|
||||||
|
<property name="MAX_HISTORY" value="30" />
|
||||||
|
<!-- 日志输出格式 -->
|
||||||
|
<!-- INFO日志Appender -->
|
||||||
|
<appender name="FILE_INFO" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||||
|
<file>${log.path}/info.${log.file}.log</file>
|
||||||
|
<filter class="ch.qos.logback.classic.filter.LevelFilter">
|
||||||
|
<level>INFO</level>
|
||||||
|
<onMatch>ACCEPT</onMatch>
|
||||||
|
<onMismatch>DENY</onMismatch>
|
||||||
|
</filter>
|
||||||
|
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||||
|
<fileNamePattern>${log.path}/info/info.${log.file}.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
|
||||||
|
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
|
||||||
|
<maxHistory>${MAX_HISTORY}</maxHistory>
|
||||||
|
</rollingPolicy>
|
||||||
|
<encoder>
|
||||||
|
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
|
||||||
|
</encoder>
|
||||||
|
</appender>
|
||||||
|
|
||||||
|
<!-- ERROR日志Appender -->
|
||||||
|
<appender name="FILE_ERROR" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||||
|
<file>${log.path}/error.${log.file}.log</file>
|
||||||
|
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||||
|
<level>ERROR</level>
|
||||||
|
</filter>
|
||||||
|
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||||
|
<fileNamePattern>${log.path}/error/error.${log.file}.%d{yyyy-MM-dd}.%i.log.gz</fileNamePattern>
|
||||||
|
<maxFileSize>${MAX_FILE_SIZE}</maxFileSize>
|
||||||
|
<maxHistory>${MAX_HISTORY}</maxHistory>
|
||||||
|
</rollingPolicy>
|
||||||
|
<encoder>
|
||||||
|
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
|
||||||
|
</encoder>
|
||||||
|
</appender>
|
||||||
|
|
||||||
|
<!-- 根Logger配置(禁用控制台输出) -->
|
||||||
|
<root level="INFO">
|
||||||
|
<appender-ref ref="FILE_INFO" />
|
||||||
|
<appender-ref ref="FILE_ERROR" />
|
||||||
|
</root>
|
||||||
|
|
||||||
|
</configuration>
|
||||||
|
|
@ -0,0 +1,5 @@
|
||||||
|
security.protocol = SASL_PLAINTEXT
|
||||||
|
kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||||
|
acks = 1
|
||||||
|
bootstrap.servers = 53.1.213.27:21007,53.1.213.26:21007,53.1.213.25:21007
|
||||||
|
sasl.kerberos.service.name = kafka
|
||||||
|
|
@ -0,0 +1,192 @@
|
||||||
|
log.cleaner.min.compaction.lag.ms = 0
|
||||||
|
quota.producer.default = 9223372036854775807
|
||||||
|
metric.reporters = com.huawei.bigdata.kafka.kafkabalancer.reporter.plugin.CoreMetricReporter
|
||||||
|
offsets.topic.num.partitions = 50
|
||||||
|
log.flush.interval.messages = 9223372036854775807
|
||||||
|
controller.socket.timeout.ms = 30000
|
||||||
|
auto.create.topics.enable = true
|
||||||
|
log.flush.interval.ms = 9223372036854775807
|
||||||
|
actual.broker.id.ip.map =
|
||||||
|
listener.name.sasl_plaintext.plain.sasl.server.callback.handler.class = com.huawei.kafka.plain.PlainCallBackHandler
|
||||||
|
replica.socket.receive.buffer.bytes = 65536
|
||||||
|
min.insync.replicas = 1
|
||||||
|
ssl.enable = false
|
||||||
|
replica.fetch.wait.max.ms = 500
|
||||||
|
num.recovery.threads.per.data.dir = 10
|
||||||
|
ssl.keystore.type = JKS
|
||||||
|
super.users = User:kafka
|
||||||
|
sasl.mechanism.inter.broker.protocol = GSSAPI
|
||||||
|
default.replication.factor = 2
|
||||||
|
log.preallocate = false
|
||||||
|
sasl.kerberos.principal.to.local.rules = RULE:[2:$1@$0](.*@.*)s/@.*//,RULE:[1:$1@$0](.*@*.COM)s/@.*//,DEFAULT
|
||||||
|
metrics.reporter.topic.replicas = 3
|
||||||
|
actual.broker.id.port.map =
|
||||||
|
fetch.purgatory.purge.interval.requests = 1000
|
||||||
|
replica.socket.timeout.ms = 30000
|
||||||
|
message.max.bytes = 100001200
|
||||||
|
max.connections.per.user = 2147483647
|
||||||
|
transactional.id.expiration.ms = 604800000
|
||||||
|
control.plane.listener.name = TRACE
|
||||||
|
transaction.state.log.replication.factor = 3
|
||||||
|
num.io.threads = 8
|
||||||
|
monitor.zk.ssl.connect = 53.1.213.24:24002,53.1.213.23:24002,53.1.213.22:24002
|
||||||
|
offsets.commit.required.acks = -1
|
||||||
|
log.flush.offset.checkpoint.interval.ms = 60000
|
||||||
|
quota.window.size.seconds = 1
|
||||||
|
delete.topic.enable = true
|
||||||
|
ssl.truststore.type = JKS
|
||||||
|
offsets.commit.timeout.ms = 5000
|
||||||
|
quota.window.num = 11
|
||||||
|
log.partition.strategy = count
|
||||||
|
zookeeper.connect = 53.1.213.24:24002,53.1.213.23:24002,53.1.213.22:24002/kafka
|
||||||
|
authorizer.class.name = org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer
|
||||||
|
auto.reassign.check.interval.ms = 600000
|
||||||
|
user.group.cache.timeout.sec = 300
|
||||||
|
auto.reassign.enable = true
|
||||||
|
num.replica.fetchers = 1
|
||||||
|
alter.log.dirs.replication.quota.window.size.seconds = 1
|
||||||
|
allow.everyone.if.no.acl.found = false
|
||||||
|
ip.mode = IPV4
|
||||||
|
alter.log.dirs.replication.quota.window.num = 11
|
||||||
|
log.roll.jitter.hours = 0
|
||||||
|
tmp.zookeeper.connect = 53.1.213.24:24002,53.1.213.23:24002,53.1.213.22:24002
|
||||||
|
log.cleaner.enable = true
|
||||||
|
offsets.load.buffer.size = 5242880
|
||||||
|
log.cleaner.delete.retention.ms = 86400000
|
||||||
|
ssl.client.auth = none
|
||||||
|
controlled.shutdown.max.retries = 3
|
||||||
|
queued.max.requests = 500
|
||||||
|
metrics.reporter.max.request.size = 104857600
|
||||||
|
offsets.topic.replication.factor = 3
|
||||||
|
log.cleaner.threads = 1
|
||||||
|
transaction.state.log.min.isr = 2
|
||||||
|
sasl.kerberos.service.name = kafka
|
||||||
|
sasl.kerberos.ticket.renew.jitter = 0.05
|
||||||
|
socket.request.max.bytes = 104857600
|
||||||
|
zookeeper.session.timeout.ms = 45000
|
||||||
|
log.retention.bytes = -1
|
||||||
|
log.message.timestamp.type = CreateTime
|
||||||
|
request.total.time.ms.threshold = 30000
|
||||||
|
sasl.kerberos.min.time.before.relogin = 60000
|
||||||
|
zookeeper.set.acl = true
|
||||||
|
connections.max.idle.ms = 600000
|
||||||
|
offsets.retention.minutes = 10080
|
||||||
|
delegation.token.expiry.time.ms = 86400000
|
||||||
|
max.connections = 2147483647
|
||||||
|
is.security.mode = yes
|
||||||
|
transaction.state.log.num.partitions = 50
|
||||||
|
inter.broker.protocol.version = 3.6-IV1
|
||||||
|
replica.fetch.backoff.ms = 1000
|
||||||
|
kafka.metrics.reporters = com.huawei.kafka.PartitionStatusReporter
|
||||||
|
listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL,TRACE:SASL_PLAINTEXT
|
||||||
|
log.retention.hours = 168
|
||||||
|
num.partitions = 2
|
||||||
|
listeners = SASL_PLAINTEXT://53.1.213.25:21007,PLAINTEXT://53.1.213.25:21005,SSL://53.1.213.25:21008,SASL_SSL://53.1.213.25:21009,TRACE://53.1.213.25:21013
|
||||||
|
ssl.enabled.protocols = TLSv1.2
|
||||||
|
delete.records.purgatory.purge.interval.requests = 1
|
||||||
|
monitor.zk.normal.connect = 53.1.213.24:24002,53.1.213.23:24002,53.1.213.22:24002
|
||||||
|
ssl.cipher.suites = TLS_DHE_DSS_WITH_AES_128_GCM_SHA256,TLS_DHE_DSS_WITH_AES_256_GCM_SHA384,TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
|
||||||
|
log.flush.scheduler.interval.ms = 9223372036854775807
|
||||||
|
sasl.port = 21007
|
||||||
|
ssl.mode.enable = true
|
||||||
|
security.protocol = SASL_PLAINTEXT
|
||||||
|
log.index.size.max.bytes = 10485760
|
||||||
|
rack.aware.enable = false
|
||||||
|
security.inter.broker.protocol = SASL_PLAINTEXT
|
||||||
|
replica.fetch.max.bytes = 104857600
|
||||||
|
log.cleaner.dedupe.buffer.size = 134217728
|
||||||
|
replica.high.watermark.checkpoint.interval.ms = 5000
|
||||||
|
replication.quota.window.size.seconds = 1
|
||||||
|
log.cleaner.io.buffer.size = 524288
|
||||||
|
sasl.kerberos.ticket.renew.window.factor = 0.8
|
||||||
|
metrics.reporter.zookeeper.url = 53.1.213.24:24002,53.1.213.23:24002,53.1.213.22:24002/kafka
|
||||||
|
max.connections.per.user.enable = true
|
||||||
|
bootstrap.servers = 53.1.213.27:21007,53.1.213.26:21007,53.1.213.25:21007
|
||||||
|
metrics.reporter.sasl.kerberos.service.name = kafka
|
||||||
|
zookeeper.connection.timeout.ms = 45000
|
||||||
|
metrics.recording.level = INFO
|
||||||
|
metrics.reporter.bootstrap.servers = 53.1.213.27:21009,53.1.213.26:21009,53.1.213.25:21009
|
||||||
|
controlled.shutdown.retry.backoff.ms = 5000
|
||||||
|
sasl-ssl.port = 21009
|
||||||
|
advertised.broker.id.port.map =
|
||||||
|
listener.name.sasl_ssl.plain.sasl.server.callback.handler.class = com.huawei.kafka.plain.PlainCallBackHandler
|
||||||
|
log.roll.hours = 168
|
||||||
|
log.cleanup.policy = delete
|
||||||
|
log.flush.start.offset.checkpoint.interval.ms = 60000
|
||||||
|
host.name = 53.1.213.25
|
||||||
|
max.connections.per.user.overrides =
|
||||||
|
max.connections.per.user.whitelist = kafka,default#principal
|
||||||
|
transaction.state.log.segment.bytes = 104857600
|
||||||
|
max.connections.per.ip = 2147483647
|
||||||
|
offsets.topic.segment.bytes = 104857600
|
||||||
|
background.threads = 10
|
||||||
|
quota.consumer.default = 9223372036854775807
|
||||||
|
request.timeout.ms = 30000
|
||||||
|
log.message.format.version = 3.6-IV1
|
||||||
|
group.initial.rebalance.delay.ms = 3000
|
||||||
|
log.index.interval.bytes = 4096
|
||||||
|
log.segment.bytes = 1073741824
|
||||||
|
log.cleaner.backoff.ms = 15000
|
||||||
|
kafka.zookeeper.root = /kafka
|
||||||
|
offset.metadata.max.bytes = 4096
|
||||||
|
ssl.truststore.location = #{conf_dir}/truststore.jks
|
||||||
|
group.max.session.timeout.ms = 1800000
|
||||||
|
replica.fetch.response.max.bytes = 104857600
|
||||||
|
port = 21005
|
||||||
|
zookeeper.sync.time.ms = 2000
|
||||||
|
log.segment.delete.delay.ms = 60000
|
||||||
|
ssl.port = 21008
|
||||||
|
fetch.max.bytes = 115343360
|
||||||
|
user.group.query.retry.backoff.ms = 300
|
||||||
|
log.dirs = /srv/BigData/kafka/data1/kafka-logs,/srv/BigData/kafka/data2/kafka-logs,/srv/BigData/kafka/data3/kafka-logs,/srv/BigData/kafka/data4/kafka-logs
|
||||||
|
monitor.keytab = /opt/huawei/Bigdata/om-agent/nodeagent/etc/agent/omm.keytab
|
||||||
|
controlled.shutdown.enable = true
|
||||||
|
az.aware.enable = false
|
||||||
|
compression.type = producer
|
||||||
|
max.connections.per.ip.overrides =
|
||||||
|
log.message.timestamp.difference.max.ms = 9223372036854775807
|
||||||
|
metrics.reporter.kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||||
|
kafka.metrics.polling.interval.secs = 60
|
||||||
|
advertised.listeners.protocol = SASL_SSL
|
||||||
|
sasl.kerberos.kinit.cmd = /opt/huawei/Bigdata/FusionInsight_BASE_8.5.0/install/FusionInsight-kerberos-1.20/kerberos/bin/kinit
|
||||||
|
transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000
|
||||||
|
log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308
|
||||||
|
auto.leader.rebalance.enable = true
|
||||||
|
leader.imbalance.check.interval.seconds = 3600
|
||||||
|
log.cleaner.min.cleanable.ratio = 0.5
|
||||||
|
user.group.query.retry = 10
|
||||||
|
replica.lag.time.max.ms = 60000
|
||||||
|
max.incremental.fetch.session.cache.slots = 1000
|
||||||
|
delegation.token.master.key = null
|
||||||
|
num.network.threads = 6
|
||||||
|
reserved.broker.max.id = 65535
|
||||||
|
listener.name.external_sasl_plaintext.plain.sasl.server.callback.handler.class = com.huawei.kafka.plain.PlainCallBackHandler
|
||||||
|
monitor.principal = oms/manager@A528C942_01A6_1BEF_7A75_0187DC82C40F.COM
|
||||||
|
transaction.remove.expired.transaction.cleanup.interval.ms = 3600000
|
||||||
|
socket.send.buffer.bytes = 1024000
|
||||||
|
log.message.downconversion.enable = true
|
||||||
|
advertised.broker.id.ip.map =
|
||||||
|
metrics.reporter.security.protocol = SASL_SSL
|
||||||
|
transaction.state.log.load.buffer.size = 5242880
|
||||||
|
socket.receive.buffer.bytes = 1024000
|
||||||
|
ssl.keystore.location = #{conf_dir}/kafka_broker.jks
|
||||||
|
replica.fetch.min.bytes = 1
|
||||||
|
broker.rack = /default/rack0
|
||||||
|
controller.port = 21013
|
||||||
|
unclean.leader.election.enable = false
|
||||||
|
sasl.enabled.mechanisms = GSSAPI,PLAIN
|
||||||
|
group.min.session.timeout.ms = 6000
|
||||||
|
offsets.retention.check.interval.ms = 600000
|
||||||
|
log.cleaner.io.buffer.load.factor = 0.9
|
||||||
|
transaction.max.timeout.ms = 900000
|
||||||
|
kerberos.domain.name = hadoop.a528c942_01a6_1bef_7a75_0187dc82c40f.com
|
||||||
|
producer.purgatory.purge.interval.requests = 1000
|
||||||
|
group.max.size = 2147483647
|
||||||
|
broker.id = 1
|
||||||
|
offsets.topic.compression.codec = 0
|
||||||
|
delegation.token.max.lifetime.ms = 604800000
|
||||||
|
replication.quota.window.num = 11
|
||||||
|
enable.advertised.listener = false
|
||||||
|
log.retention.check.interval.ms = 300000
|
||||||
|
leader.imbalance.per.broker.percentage = 10
|
||||||
|
queued.max.request.bytes = -1
|
||||||
Binary file not shown.
|
|
@ -2,6 +2,7 @@ package org.dromara.data2es.config;
|
||||||
|
|
||||||
import org.apache.kafka.clients.admin.NewTopic;
|
import org.apache.kafka.clients.admin.NewTopic;
|
||||||
import org.apache.kafka.clients.producer.KafkaProducer;
|
import org.apache.kafka.clients.producer.KafkaProducer;
|
||||||
|
import org.apache.kafka.common.config.SslConfigs;
|
||||||
import org.dromara.data2es.producer.NewProducer;
|
import org.dromara.data2es.producer.NewProducer;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
@ -23,7 +24,7 @@ public class KafkaConfig {
|
||||||
|
|
||||||
private Logger logger = LoggerFactory.getLogger(KafkaConfig.class);
|
private Logger logger = LoggerFactory.getLogger(KafkaConfig.class);
|
||||||
|
|
||||||
private String kafkaServers = "53.1.212.25:21007,53.1.212.26:21007,53.1.212.27:21007"; //省厅 kafka
|
private String kafkaServers = "53.1.212.25:21009,53.1.212.26:21009,53.1.212.27:21009"; //省厅 kafka
|
||||||
// private String kafkaServers = "53.208.61.105:6667,53.208.61.106:6667,53.208.61.107:6667";//六安GA网
|
// private String kafkaServers = "53.208.61.105:6667,53.208.61.106:6667,53.208.61.107:6667";//六安GA网
|
||||||
// private String kafkaServers = "34.72.62.93:9092";//六安视频网
|
// private String kafkaServers = "34.72.62.93:9092";//六安视频网
|
||||||
// private String kafkaServers = "127.0.0.1:9092";//本地
|
// private String kafkaServers = "127.0.0.1:9092";//本地
|
||||||
|
|
@ -69,6 +70,10 @@ public class KafkaConfig {
|
||||||
*/
|
*/
|
||||||
private static final String USER_PRINCIPAL = "yhy_ahrs_rcw@A528C942_01A6_1BEF_7A75_0187DC82C40F.COM";
|
private static final String USER_PRINCIPAL = "yhy_ahrs_rcw@A528C942_01A6_1BEF_7A75_0187DC82C40F.COM";
|
||||||
|
|
||||||
|
private static final String USER_NAME = "yhy_ahrs_rcw";
|
||||||
|
|
||||||
|
private static final String PASS_WORD = "Ycgis@2509";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 新Producer 构造函数
|
* 新Producer 构造函数
|
||||||
* @param
|
* @param
|
||||||
|
|
@ -85,15 +90,23 @@ public class KafkaConfig {
|
||||||
{
|
{
|
||||||
logger.info("Securitymode start.");
|
logger.info("Securitymode start.");
|
||||||
//!!注意,安全认证时,需要用户手动修改为自己申请的机机账号
|
//!!注意,安全认证时,需要用户手动修改为自己申请的机机账号
|
||||||
LoginUtil.securityPrepare(USER_PRINCIPAL, USER_KEYTAB_FILE);
|
// LoginUtil.securityPrepare(USER_PRINCIPAL, USER_KEYTAB_FILE);
|
||||||
props.put(securityProtocol, "SASL_PLAINTEXT");
|
props.put(securityProtocol, "SASL_SSL");
|
||||||
// props.put("sasl.mechanism", "GSSAPI");
|
props.put("sasl.mechanism", "PLAIN"); // 使用 PLAIN 机制
|
||||||
// 服务名
|
|
||||||
props.put(saslKerberosServiceName, "kafka");
|
// SSL 配置 - 使用系统默认信任库
|
||||||
// 域名
|
props.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, "/home/kafka.truststore.jks");
|
||||||
props.put(kerberosDomainName, "A528C942_01A6_1BEF_7A75_0187DC82C40F.COM");
|
props.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, "Ycgis@2509");
|
||||||
|
props.put(SslConfigs.DEFAULT_SSL_KEYSTORE_TYPE, "JKS");
|
||||||
|
|
||||||
|
// PLAIN 机制的 JAAS 配置
|
||||||
|
String jaasConfig = "org.apache.kafka.common.security.plain.PlainLoginModule required "
|
||||||
|
+ "username=\"" + USER_NAME + "\" "
|
||||||
|
+ "password=\"" + PASS_WORD + "\";";
|
||||||
|
|
||||||
|
props.put("sasl.jaas.config", jaasConfig);
|
||||||
}
|
}
|
||||||
catch (IOException e)
|
catch (Exception e)
|
||||||
{
|
{
|
||||||
logger.error("Security prepare failure.");
|
logger.error("Security prepare failure.");
|
||||||
logger.error("The IOException occured.", e);
|
logger.error("The IOException occured.", e);
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue