mirror of
https://gitee.com/san-bing/JChargePointProtocol
synced 2026-05-07 11:29:53 +08:00
云快充1.5.0 初始化
This commit is contained in:
@@ -0,0 +1,94 @@
|
||||
/**
|
||||
* 抖音关注:程序员三丙
|
||||
* 知识星球:https://t.zsxq.com/j9b21
|
||||
*/
|
||||
package sanbing.jcpp.infrastructure.queue.kafka;
|
||||
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.admin.CreateTopicsResult;
|
||||
import org.apache.kafka.clients.admin.NewTopic;
|
||||
import org.apache.kafka.common.errors.TopicExistsException;
|
||||
import sanbing.jcpp.infrastructure.queue.QueueAdmin;
|
||||
import sanbing.jcpp.infrastructure.util.property.PropertyUtils;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
@Slf4j
|
||||
public class KafkaAdmin implements QueueAdmin {
|
||||
|
||||
private final KafkaSettings settings;
|
||||
private final Map<String, String> topicConfigs;
|
||||
private final int numPartitions;
|
||||
private volatile Set<String> topics;
|
||||
|
||||
private final short replicationFactor;
|
||||
|
||||
public KafkaAdmin(KafkaSettings settings, Map<String, String> topicConfigs) {
|
||||
this.settings = settings;
|
||||
this.topicConfigs = topicConfigs;
|
||||
|
||||
String numPartitionsStr = topicConfigs.get(KafkaTopicConfigs.NUM_PARTITIONS_SETTING);
|
||||
if (numPartitionsStr != null) {
|
||||
numPartitions = Integer.parseInt(numPartitionsStr);
|
||||
topicConfigs.remove("partitions");
|
||||
} else {
|
||||
numPartitions = 1;
|
||||
}
|
||||
replicationFactor = settings.getReplicationFactor();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void createTopicIfNotExists(String topic, String properties) {
|
||||
Set<String> topics = getTopics();
|
||||
if (topics.contains(topic)) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
NewTopic newTopic = new NewTopic(topic, numPartitions, replicationFactor).configs(PropertyUtils.getProps(topicConfigs, properties));
|
||||
createTopic(newTopic).values().get(topic).get();
|
||||
topics.add(topic);
|
||||
} catch (ExecutionException ee) {
|
||||
switch (ee.getCause()) {
|
||||
case TopicExistsException ignored -> {
|
||||
//do nothing
|
||||
}
|
||||
case null, default -> {
|
||||
log.warn("[{}] Failed to create topic", topic, ee);
|
||||
throw new RuntimeException(ee);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.warn("[{}] Failed to create topic", topic, e);
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private Set<String> getTopics() {
|
||||
if (topics == null) {
|
||||
synchronized (this) {
|
||||
if (topics == null) {
|
||||
topics = ConcurrentHashMap.newKeySet();
|
||||
try {
|
||||
topics.addAll(settings.getAdminClient().listTopics().names().get());
|
||||
} catch (InterruptedException | ExecutionException e) {
|
||||
log.error("Failed to get all topics.", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return topics;
|
||||
}
|
||||
|
||||
public CreateTopicsResult createTopic(NewTopic topic) {
|
||||
return settings.getAdminClient().createTopics(Collections.singletonList(topic));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void destroy() {
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
/**
|
||||
* 抖音关注:程序员三丙
|
||||
* 知识星球:https://t.zsxq.com/j9b21
|
||||
*/
|
||||
package sanbing.jcpp.infrastructure.queue.kafka;
|
||||
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Getter;
|
||||
import lombok.NoArgsConstructor;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
@Component
|
||||
@ConditionalOnProperty(prefix = "queue", value = "type", havingValue = "kafka")
|
||||
@Getter
|
||||
@AllArgsConstructor
|
||||
@NoArgsConstructor
|
||||
public class KafkaConsumerStatisticConfig {
|
||||
|
||||
@Value("${queue.kafka.consumer-stats.enabled:true}")
|
||||
private Boolean enabled;
|
||||
|
||||
@Value("${queue.kafka.consumer-stats.print-interval-ms:60000}")
|
||||
private Long printIntervalMs;
|
||||
|
||||
@Value("${queue.kafka.consumer-stats.kafka-response-timeout-ms:1000}")
|
||||
private Long kafkaResponseTimeoutMs;
|
||||
}
|
||||
@@ -0,0 +1,158 @@
|
||||
/**
|
||||
* 抖音关注:程序员三丙
|
||||
* 知识星球:https://t.zsxq.com/j9b21
|
||||
*/
|
||||
package sanbing.jcpp.infrastructure.queue.kafka;
|
||||
|
||||
import jakarta.annotation.PostConstruct;
|
||||
import jakarta.annotation.PreDestroy;
|
||||
import lombok.Builder;
|
||||
import lombok.Data;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.stereotype.Component;
|
||||
import sanbing.jcpp.infrastructure.util.async.JCPPThreadFactory;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
@Slf4j
|
||||
@Component
|
||||
@RequiredArgsConstructor
|
||||
@ConditionalOnProperty(prefix = "queue", value = "type", havingValue = "kafka")
|
||||
public class KafkaConsumerStatsService {
|
||||
|
||||
private final Set<String> monitoredGroups = ConcurrentHashMap.newKeySet();
|
||||
|
||||
private final KafkaSettings kafkaSettings;
|
||||
private final KafkaConsumerStatisticConfig statsConfig;
|
||||
|
||||
private Consumer<String, byte[]> consumer;
|
||||
private ScheduledExecutorService statsPrintScheduler;
|
||||
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
if (!statsConfig.getEnabled()) {
|
||||
return;
|
||||
}
|
||||
this.statsPrintScheduler = Executors.newSingleThreadScheduledExecutor(JCPPThreadFactory.forName("kafka-consumer-stats", Thread.MAX_PRIORITY));
|
||||
|
||||
Properties consumerProps = kafkaSettings.toConsumerProps(null);
|
||||
consumerProps.put(ConsumerConfig.CLIENT_ID_CONFIG, "consumer-stats-loader-client");
|
||||
consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "consumer-stats-loader-client-group");
|
||||
this.consumer = new KafkaConsumer<>(consumerProps);
|
||||
|
||||
startLogScheduling();
|
||||
}
|
||||
|
||||
private void startLogScheduling() {
|
||||
Duration timeoutDuration = Duration.ofMillis(statsConfig.getKafkaResponseTimeoutMs());
|
||||
statsPrintScheduler.scheduleWithFixedDelay(() -> {
|
||||
if (!isStatsPrintRequired()) {
|
||||
return;
|
||||
}
|
||||
for (String groupId : monitoredGroups) {
|
||||
try {
|
||||
Map<TopicPartition, OffsetAndMetadata> groupOffsets = kafkaSettings.getAdminClient().listConsumerGroupOffsets(groupId).partitionsToOffsetAndMetadata()
|
||||
.get(statsConfig.getKafkaResponseTimeoutMs(), TimeUnit.MILLISECONDS);
|
||||
Map<TopicPartition, Long> endOffsets = consumer.endOffsets(groupOffsets.keySet(), timeoutDuration);
|
||||
|
||||
List<GroupTopicStats> lagTopicsStats = getTopicsStatsWithLag(groupOffsets, endOffsets);
|
||||
if (!lagTopicsStats.isEmpty()) {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
for (int i = 0; i < lagTopicsStats.size(); i++) {
|
||||
builder.append(lagTopicsStats.get(i).toString());
|
||||
if (i != lagTopicsStats.size() - 1) {
|
||||
builder.append(", ");
|
||||
}
|
||||
}
|
||||
log.info("[{}] Topic partitions with lag: [{}].", groupId, builder);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.warn("[{}] Failed to get consumer group stats. Reason - {}.", groupId, e.getMessage());
|
||||
log.trace("Detailed error: ", e);
|
||||
}
|
||||
}
|
||||
|
||||
}, statsConfig.getPrintIntervalMs(), statsConfig.getPrintIntervalMs(), TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
||||
private boolean isStatsPrintRequired() {
|
||||
return log.isInfoEnabled() ;
|
||||
}
|
||||
|
||||
private List<GroupTopicStats> getTopicsStatsWithLag(Map<TopicPartition, OffsetAndMetadata> groupOffsets, Map<TopicPartition, Long> endOffsets) {
|
||||
List<GroupTopicStats> consumerGroupStats = new ArrayList<>();
|
||||
for (TopicPartition topicPartition : groupOffsets.keySet()) {
|
||||
long endOffset = endOffsets.get(topicPartition);
|
||||
long committedOffset = groupOffsets.get(topicPartition).offset();
|
||||
long lag = endOffset - committedOffset;
|
||||
if (lag != 0) {
|
||||
GroupTopicStats groupTopicStats = GroupTopicStats.builder()
|
||||
.topic(topicPartition.topic())
|
||||
.partition(topicPartition.partition())
|
||||
.committedOffset(committedOffset)
|
||||
.endOffset(endOffset)
|
||||
.lag(lag)
|
||||
.build();
|
||||
consumerGroupStats.add(groupTopicStats);
|
||||
}
|
||||
}
|
||||
return consumerGroupStats;
|
||||
}
|
||||
|
||||
public void registerClientGroup(String groupId) {
|
||||
if (statsConfig.getEnabled() && !StringUtils.isEmpty(groupId)) {
|
||||
monitoredGroups.add(groupId);
|
||||
}
|
||||
}
|
||||
|
||||
public void unregisterClientGroup(String groupId) {
|
||||
if (statsConfig.getEnabled() && !StringUtils.isEmpty(groupId)) {
|
||||
monitoredGroups.remove(groupId);
|
||||
}
|
||||
}
|
||||
|
||||
@PreDestroy
|
||||
public void destroy() {
|
||||
if (statsPrintScheduler != null) {
|
||||
statsPrintScheduler.shutdownNow();
|
||||
}
|
||||
if (consumer != null) {
|
||||
consumer.close();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Builder
|
||||
@Data
|
||||
private static class GroupTopicStats {
|
||||
private String topic;
|
||||
private int partition;
|
||||
private long committedOffset;
|
||||
private long endOffset;
|
||||
private long lag;
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "[" +
|
||||
"topic=[" + topic + ']' +
|
||||
", partition=[" + partition + "]" +
|
||||
", committedOffset=[" + committedOffset + "]" +
|
||||
", endOffset=[" + endOffset + "]" +
|
||||
", lag=[" + lag + "]" +
|
||||
"]";
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,117 @@
|
||||
/**
|
||||
* 抖音关注:程序员三丙
|
||||
* 知识星球:https://t.zsxq.com/j9b21
|
||||
*/
|
||||
package sanbing.jcpp.infrastructure.queue.kafka;
|
||||
|
||||
import lombok.Builder;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.springframework.util.StopWatch;
|
||||
import sanbing.jcpp.infrastructure.queue.AbstractQueueConsumerTemplate;
|
||||
import sanbing.jcpp.infrastructure.queue.KafkaQueueMsg;
|
||||
import sanbing.jcpp.infrastructure.queue.QueueAdmin;
|
||||
import sanbing.jcpp.infrastructure.queue.QueueMsg;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.time.Duration;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
|
||||
@Slf4j
|
||||
public class KafkaConsumerTemplate<T extends QueueMsg> extends AbstractQueueConsumerTemplate<ConsumerRecord<String, byte[]>, T> {
|
||||
|
||||
private final QueueAdmin admin;
|
||||
private final KafkaConsumer<String, byte[]> consumer;
|
||||
private final KafkaDecoder<T> decoder;
|
||||
|
||||
private final KafkaConsumerStatsService statsService;
|
||||
private final String groupId;
|
||||
|
||||
@Builder
|
||||
private KafkaConsumerTemplate(KafkaSettings settings, KafkaDecoder<T> decoder,
|
||||
String clientId, String groupId, String topic,
|
||||
QueueAdmin admin, KafkaConsumerStatsService statsService) {
|
||||
super(topic);
|
||||
Properties props = settings.toConsumerProps(topic);
|
||||
props.put(ConsumerConfig.CLIENT_ID_CONFIG, clientId);
|
||||
if (groupId != null) {
|
||||
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
|
||||
}
|
||||
|
||||
this.statsService = statsService;
|
||||
this.groupId = groupId;
|
||||
|
||||
if (statsService != null) {
|
||||
statsService.registerClientGroup(groupId);
|
||||
}
|
||||
|
||||
this.admin = admin;
|
||||
this.consumer = new KafkaConsumer<>(props);
|
||||
this.decoder = decoder;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doSubscribe(List<String> topicNames) {
|
||||
if (!topicNames.isEmpty()) {
|
||||
topicNames.forEach(admin::createTopicIfNotExists);
|
||||
consumer.subscribe(topicNames);
|
||||
} else {
|
||||
log.info("unsubscribe due to empty topic list");
|
||||
consumer.unsubscribe();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected List<ConsumerRecord<String, byte[]>> doPoll(long durationInMillis) {
|
||||
StopWatch stopWatch = new StopWatch();
|
||||
stopWatch.start();
|
||||
|
||||
log.trace("poll topic {} maxDuration {}", getTopic(), durationInMillis);
|
||||
|
||||
ConsumerRecords<String, byte[]> records = consumer.poll(Duration.ofMillis(durationInMillis));
|
||||
|
||||
stopWatch.stop();
|
||||
log.trace("poll topic {} took {}ms", getTopic(), stopWatch.getTotalTimeMillis());
|
||||
|
||||
if (records.isEmpty()) {
|
||||
return Collections.emptyList();
|
||||
} else {
|
||||
List<ConsumerRecord<String, byte[]>> recordList = new ArrayList<>(256);
|
||||
records.forEach(recordList::add);
|
||||
return recordList;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public T decode(ConsumerRecord<String, byte[]> record) throws IOException {
|
||||
return decoder.decode(new KafkaQueueMsg(record));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doCommit() {
|
||||
consumer.commitSync();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doUnsubscribe() {
|
||||
if (consumer != null) {
|
||||
consumer.unsubscribe();
|
||||
consumer.close();
|
||||
}
|
||||
if (statsService != null) {
|
||||
statsService.unregisterClientGroup(groupId);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLongPollingSupported() {
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
/**
|
||||
* 抖音关注:程序员三丙
|
||||
* 知识星球:https://t.zsxq.com/j9b21
|
||||
*/
|
||||
package sanbing.jcpp.infrastructure.queue.kafka;
|
||||
|
||||
|
||||
import sanbing.jcpp.infrastructure.queue.QueueMsg;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public interface KafkaDecoder<T> {
|
||||
|
||||
T decode(QueueMsg msg) throws IOException;
|
||||
|
||||
}
|
||||
@@ -0,0 +1,133 @@
|
||||
/**
|
||||
* 抖音关注:程序员三丙
|
||||
* 知识星球:https://t.zsxq.com/j9b21
|
||||
*/
|
||||
package sanbing.jcpp.infrastructure.queue.kafka;
|
||||
|
||||
import lombok.Builder;
|
||||
import lombok.Getter;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.kafka.clients.producer.KafkaProducer;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import org.apache.kafka.common.header.Header;
|
||||
import org.apache.kafka.common.header.internals.RecordHeader;
|
||||
import sanbing.jcpp.infrastructure.queue.QueueAdmin;
|
||||
import sanbing.jcpp.infrastructure.queue.QueueCallback;
|
||||
import sanbing.jcpp.infrastructure.queue.QueueMsg;
|
||||
import sanbing.jcpp.infrastructure.queue.QueueProducer;
|
||||
import sanbing.jcpp.infrastructure.queue.common.TopicPartitionInfo;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
@Slf4j
|
||||
public class KafkaProducerTemplate<T extends QueueMsg> implements QueueProducer<T> {
|
||||
|
||||
private final KafkaProducer<String, byte[]> producer;
|
||||
|
||||
@Getter
|
||||
private final String topic;
|
||||
|
||||
@Getter
|
||||
private final KafkaSettings settings;
|
||||
|
||||
private final QueueAdmin admin;
|
||||
|
||||
private final Set<TopicPartitionInfo> topics;
|
||||
|
||||
@Getter
|
||||
private final String clientId;
|
||||
|
||||
@Builder
|
||||
private KafkaProducerTemplate(KafkaSettings settings, String topic, String clientId, QueueAdmin admin) {
|
||||
Properties props = settings.toProducerProps(topic);
|
||||
|
||||
this.clientId = Objects.requireNonNull(clientId, "Kafka producer client.id is null");
|
||||
if (!StringUtils.isEmpty(clientId)) {
|
||||
props.put(ProducerConfig.CLIENT_ID_CONFIG, clientId);
|
||||
}
|
||||
this.settings = settings;
|
||||
|
||||
this.producer = new KafkaProducer<>(props);
|
||||
this.topic = topic;
|
||||
this.admin = admin;
|
||||
topics = ConcurrentHashMap.newKeySet();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init() {
|
||||
}
|
||||
|
||||
void addAnalyticHeaders(List<Header> headers) {
|
||||
headers.add(new RecordHeader("_producerId", getClientId().getBytes(StandardCharsets.UTF_8)));
|
||||
headers.add(new RecordHeader("_threadName", Thread.currentThread().getName().getBytes(StandardCharsets.UTF_8)));
|
||||
if (log.isTraceEnabled()) {
|
||||
try {
|
||||
StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace();
|
||||
int maxLevel = Math.min(stackTrace.length, 20);
|
||||
for (int i = 2; i < maxLevel; i++) { // ignore two levels: getStackTrace and addAnalyticHeaders
|
||||
headers.add(new RecordHeader("_stackTrace" + i, stackTrace[i].toString().getBytes(StandardCharsets.UTF_8)));
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
log.trace("Failed to add stacktrace headers in Kafka producer {}", getClientId(), t);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void send(TopicPartitionInfo tpi, T msg, QueueCallback callback) {
|
||||
try {
|
||||
createTopicIfNotExist(tpi);
|
||||
String key = msg.getKey();
|
||||
byte[] data = msg.getData();
|
||||
ProducerRecord<String, byte[]> record;
|
||||
List<Header> headers = msg.getHeaders().getData().entrySet().stream().map(e -> new RecordHeader(e.getKey(), e.getValue())).collect(Collectors.toList());
|
||||
if (log.isDebugEnabled()) {
|
||||
addAnalyticHeaders(headers);
|
||||
}
|
||||
record = new ProducerRecord<>(tpi.getFullTopicName(), null, key, data, headers);
|
||||
producer.send(record, (metadata, exception) -> {
|
||||
if (exception == null) {
|
||||
if (callback != null) {
|
||||
callback.onSuccess(new KafkaQueueMsgMetadata(metadata));
|
||||
}
|
||||
} else {
|
||||
if (callback != null) {
|
||||
callback.onFailure(exception);
|
||||
} else {
|
||||
log.warn("Producer template failure: {}", exception.getMessage(), exception);
|
||||
}
|
||||
}
|
||||
});
|
||||
} catch (Exception e) {
|
||||
if (callback != null) {
|
||||
callback.onFailure(e);
|
||||
} else {
|
||||
log.warn("Producer template failure (send method wrapper): {}", e.getMessage(), e);
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
private void createTopicIfNotExist(TopicPartitionInfo tpi) {
|
||||
if (topics.contains(tpi)) {
|
||||
return;
|
||||
}
|
||||
admin.createTopicIfNotExists(tpi.getFullTopicName());
|
||||
topics.add(tpi);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop() {
|
||||
if (producer != null) {
|
||||
producer.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,17 @@
|
||||
/**
|
||||
* 抖音关注:程序员三丙
|
||||
* 知识星球:https://t.zsxq.com/j9b21
|
||||
*/
|
||||
package sanbing.jcpp.infrastructure.queue.kafka;
|
||||
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Data;
|
||||
import org.apache.kafka.clients.producer.RecordMetadata;
|
||||
import sanbing.jcpp.infrastructure.queue.QueueMsgMetadata;
|
||||
|
||||
@Data
|
||||
@AllArgsConstructor
|
||||
public class KafkaQueueMsgMetadata implements QueueMsgMetadata {
|
||||
|
||||
private RecordMetadata metadata;
|
||||
}
|
||||
@@ -0,0 +1,210 @@
|
||||
/**
|
||||
* 抖音关注:程序员三丙
|
||||
* 知识星球:https://t.zsxq.com/j9b21
|
||||
*/
|
||||
package sanbing.jcpp.infrastructure.queue.kafka;
|
||||
|
||||
import jakarta.annotation.PreDestroy;
|
||||
import lombok.Getter;
|
||||
import lombok.Setter;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.CommonClientConfigs;
|
||||
import org.apache.kafka.clients.admin.AdminClient;
|
||||
import org.apache.kafka.clients.admin.AdminClientConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
import org.apache.kafka.common.config.SslConfigs;
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
||||
import org.apache.kafka.common.serialization.StringDeserializer;
|
||||
import org.apache.kafka.common.serialization.StringSerializer;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.stereotype.Component;
|
||||
import sanbing.jcpp.infrastructure.util.property.JCPPProperty;
|
||||
import sanbing.jcpp.infrastructure.util.property.PropertyUtils;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
|
||||
@Slf4j
|
||||
@ConditionalOnProperty(prefix = "queue", value = "type", havingValue = "kafka")
|
||||
@ConfigurationProperties(prefix = "queue.kafka")
|
||||
@Component
|
||||
public class KafkaSettings {
|
||||
|
||||
@Value("${queue.kafka.bootstrap-servers}")
|
||||
private String servers;
|
||||
|
||||
@Value("${queue.kafka.ssl.enabled:false}")
|
||||
private boolean sslEnabled;
|
||||
|
||||
@Value("${queue.kafka.ssl.truststore-location:}")
|
||||
private String sslTruststoreLocation;
|
||||
|
||||
@Value("${queue.kafka.ssl.truststore-password:}")
|
||||
private String sslTruststorePassword;
|
||||
|
||||
@Value("${queue.kafka.ssl.keystore-location:}")
|
||||
private String sslKeystoreLocation;
|
||||
|
||||
@Value("${queue.kafka.ssl.keystore-password:}")
|
||||
private String sslKeystorePassword;
|
||||
|
||||
@Value("${queue.kafka.ssl.key-password:}")
|
||||
private String sslKeyPassword;
|
||||
|
||||
@Value("${queue.kafka.acks:all}")
|
||||
private String acks;
|
||||
|
||||
@Value("${queue.kafka.retries:1}")
|
||||
private int retries;
|
||||
|
||||
@Value("${queue.kafka.compression-type:none}")
|
||||
private String compressionType;
|
||||
|
||||
@Value("${queue.kafka.batch-size:16384}")
|
||||
private int batchSize;
|
||||
|
||||
@Value("${queue.kafka.linger-ms:1}")
|
||||
private long lingerMs;
|
||||
|
||||
@Value("${queue.kafka.max-request-size:1048576}")
|
||||
private int maxRequestSize;
|
||||
|
||||
@Value("${queue.kafka.max-in-flight-requests-per-connection:5}")
|
||||
private int maxInFlightRequestsPerConnection;
|
||||
|
||||
@Value("${queue.kafka.buffer-memory:33554432}")
|
||||
private long bufferMemory;
|
||||
|
||||
@Value("${queue.kafka.replication-factor:1}")
|
||||
@Getter
|
||||
private short replicationFactor;
|
||||
|
||||
@Value("${queue.kafka.max-poll-records:8192}")
|
||||
private int maxPollRecords;
|
||||
|
||||
@Value("${queue.kafka.max-poll-interval-ms:300000}")
|
||||
private int maxPollIntervalMs;
|
||||
|
||||
@Value("${queue.kafka.max-partition-fetch-bytes:16777216}")
|
||||
private int maxPartitionFetchBytes;
|
||||
|
||||
@Value("${queue.kafka.fetch-max-bytes:134217728}")
|
||||
private int fetchMaxBytes;
|
||||
|
||||
@Value("${queue.kafka.request-timeout-ms:30000}")
|
||||
private int requestTimeoutMs;
|
||||
|
||||
@Value("${queue.kafka.session-timeout-ms:10000}")
|
||||
private int sessionTimeoutMs;
|
||||
|
||||
@Value("${queue.kafka.auto-offset-reset:earliest}")
|
||||
private String autoOffsetReset;
|
||||
|
||||
@Value("${queue.kafka.other-inline:}")
|
||||
private String otherInline;
|
||||
|
||||
|
||||
@Setter
|
||||
private Map<String, List<JCPPProperty>> consumerPropertiesPerTopic = Collections.emptyMap();
|
||||
@Setter
|
||||
private Map<String, List<JCPPProperty>> producerPropertiesPerTopic = Collections.emptyMap();
|
||||
|
||||
private volatile AdminClient adminClient;
|
||||
|
||||
public Properties toConsumerProps(String topic) {
|
||||
Properties props = toProps();
|
||||
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
|
||||
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
|
||||
props.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, maxPartitionFetchBytes);
|
||||
props.put(ConsumerConfig.FETCH_MAX_BYTES_CONFIG, fetchMaxBytes);
|
||||
props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalMs);
|
||||
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
|
||||
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
|
||||
|
||||
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
|
||||
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
|
||||
|
||||
consumerPropertiesPerTopic
|
||||
.getOrDefault(topic, Collections.emptyList())
|
||||
.forEach(kv -> props.put(kv.getKey(), kv.getValue()));
|
||||
|
||||
return props;
|
||||
}
|
||||
|
||||
public Properties toProducerProps(String topic) {
|
||||
Properties props = toProps();
|
||||
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
|
||||
props.put(ProducerConfig.RETRIES_CONFIG, retries);
|
||||
props.put(ProducerConfig.ACKS_CONFIG, acks);
|
||||
props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
|
||||
props.put(ProducerConfig.LINGER_MS_CONFIG, lingerMs);
|
||||
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
|
||||
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
|
||||
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
||||
props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, compressionType);
|
||||
props.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, maxRequestSize);
|
||||
props.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, maxInFlightRequestsPerConnection);
|
||||
|
||||
producerPropertiesPerTopic
|
||||
.getOrDefault(topic, Collections.emptyList())
|
||||
.forEach(kv -> props.put(kv.getKey(), kv.getValue()));
|
||||
|
||||
return props;
|
||||
}
|
||||
|
||||
Properties toProps() {
|
||||
Properties props = new Properties();
|
||||
|
||||
props.put(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG, requestTimeoutMs);
|
||||
props.put(CommonClientConfigs.SESSION_TIMEOUT_MS_CONFIG, sessionTimeoutMs);
|
||||
|
||||
props.putAll(PropertyUtils.getProps(otherInline));
|
||||
|
||||
configureSSL(props);
|
||||
|
||||
return props;
|
||||
}
|
||||
|
||||
void configureSSL(Properties props) {
|
||||
if (sslEnabled) {
|
||||
props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL");
|
||||
props.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, sslTruststoreLocation);
|
||||
props.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, sslTruststorePassword);
|
||||
props.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, sslKeystoreLocation);
|
||||
props.put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, sslKeystorePassword);
|
||||
props.put(SslConfigs.SSL_KEY_PASSWORD_CONFIG, sslKeyPassword);
|
||||
}
|
||||
}
|
||||
|
||||
public AdminClient getAdminClient() {
|
||||
if (adminClient == null) {
|
||||
synchronized (this) {
|
||||
if (adminClient == null) {
|
||||
adminClient = AdminClient.create(toAdminProps());
|
||||
}
|
||||
}
|
||||
}
|
||||
return adminClient;
|
||||
}
|
||||
|
||||
protected Properties toAdminProps() {
|
||||
Properties props = toProps();
|
||||
props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
|
||||
props.put(AdminClientConfig.RETRIES_CONFIG, retries);
|
||||
return props;
|
||||
}
|
||||
|
||||
@PreDestroy
|
||||
private void destroy() {
|
||||
if (adminClient != null) {
|
||||
adminClient.close();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,32 @@
|
||||
/**
|
||||
* 抖音关注:程序员三丙
|
||||
* 知识星球:https://t.zsxq.com/j9b21
|
||||
*/
|
||||
package sanbing.jcpp.infrastructure.queue.kafka;
|
||||
|
||||
import jakarta.annotation.PostConstruct;
|
||||
import lombok.Getter;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.stereotype.Component;
|
||||
import sanbing.jcpp.infrastructure.util.property.PropertyUtils;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
@Component
|
||||
@ConditionalOnProperty(prefix = "queue", value = "type", havingValue = "kafka")
|
||||
public class KafkaTopicConfigs {
|
||||
public static final String NUM_PARTITIONS_SETTING = "partitions";
|
||||
|
||||
@Value("${queue.kafka.topic-properties.app:}")
|
||||
private String appProperties;
|
||||
|
||||
@Getter
|
||||
private Map<String, String> appConfigs;
|
||||
|
||||
@PostConstruct
|
||||
private void init() {
|
||||
this.appConfigs = PropertyUtils.getProps(appProperties);
|
||||
}
|
||||
|
||||
}
|
||||
Reference in New Issue
Block a user