云快充1.5.0 初始化

This commit is contained in:
3god
2024-10-08 09:38:54 +08:00
parent dea6774942
commit cb19b45919
297 changed files with 18020 additions and 28 deletions

View File

@@ -0,0 +1,52 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
抖音关注:程序员三丙
知识星球https://t.zsxq.com/j9b21
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>sanbing</groupId>
<artifactId>jcpp-parent</artifactId>
<version>1.0.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>jcpp-infrastructure-queue</artifactId>
<packaging>jar</packaging>
<name>JChargePointProtocol Infrastructure Queue Module</name>
<description>基础MQ管理模块</description>
<properties>
<main.dir>${basedir}/..</main.dir>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
</dependency>
<dependency>
<groupId>sanbing</groupId>
<artifactId>jcpp-infrastructure-util</artifactId>
</dependency>
<dependency>
<groupId>sanbing</groupId>
<artifactId>jcpp-infrastructure-proto</artifactId>
</dependency>
<dependency>
<groupId>sanbing</groupId>
<artifactId>jcpp-infrastructure-stats</artifactId>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
</dependency>
</dependencies>
</project>

View File

@@ -0,0 +1,190 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue;
import jakarta.annotation.Nonnull;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import sanbing.jcpp.infrastructure.queue.common.TopicPartitionInfo;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Collectors;
import static java.util.Collections.emptyList;
@Slf4j
public abstract class AbstractQueueConsumerTemplate<R, T extends QueueMsg> implements QueueConsumer<T>{
public static final long ONE_MILLISECOND_IN_NANOS = TimeUnit.MILLISECONDS.toNanos(1);
private volatile boolean subscribed;
protected volatile boolean stopped = false;
protected volatile Set<TopicPartitionInfo> partitions;
protected final ReentrantLock consumerLock = new ReentrantLock(); //NonfairSync
final Queue<Set<TopicPartitionInfo>> subscribeQueue = new ConcurrentLinkedQueue<>();
@Getter
private final String topic;
public AbstractQueueConsumerTemplate(String topic) {
this.topic = topic;
}
@Override
public void subscribe() {
log.debug("enqueue topic subscribe {} ", topic);
if (stopped) {
log.error("trying subscribe, but consumer stopped for topic {}", topic);
return;
}
subscribeQueue.add(Collections.singleton(new TopicPartitionInfo(topic, null, true)));
}
@Override
public void subscribe(Set<TopicPartitionInfo> partitions) {
log.debug("enqueue topics subscribe {} ", partitions);
if (stopped) {
log.error("trying subscribe, but consumer stopped for topic {}", topic);
return;
}
subscribeQueue.add(partitions);
}
@Override
public List<T> poll(long durationInMillis) {
List<R> records;
long startNanos = System.nanoTime();
if (stopped) {
log.error("poll invoked but consumer stopped for topic " + topic, new RuntimeException("stacktrace"));
return emptyList();
}
if (!subscribed && partitions == null && subscribeQueue.isEmpty()) {
return sleepAndReturnEmpty(startNanos, durationInMillis);
}
if (consumerLock.isLocked()) {
log.error("poll. consumerLock is locked. will wait with no timeout. it looks like a race conditions or deadlock topic " + topic, new RuntimeException("stacktrace"));
}
consumerLock.lock();
try {
while (!subscribeQueue.isEmpty()) {
subscribed = false;
partitions = subscribeQueue.poll();
}
if (!subscribed) {
List<String> topicNames = getFullTopicNames();
log.info("Subscribing to topics {}", topicNames);
doSubscribe(topicNames);
subscribed = true;
}
records = partitions.isEmpty() ? emptyList() : doPoll(durationInMillis);
} finally {
consumerLock.unlock();
}
if (records.isEmpty() && !isLongPollingSupported()) {
return sleepAndReturnEmpty(startNanos, durationInMillis);
}
return decodeRecords(records);
}
@Nonnull
List<T> decodeRecords(@Nonnull List<R> records) {
List<T> result = new ArrayList<>(records.size());
records.forEach(record -> {
try {
if (record != null) {
result.add(decode(record));
}
} catch (IOException e) {
log.error("Failed decode record: [{}]", record);
throw new RuntimeException("Failed to decode record: ", e);
}
});
return result;
}
List<T> sleepAndReturnEmpty(final long startNanos, final long durationInMillis) {
long durationNanos = TimeUnit.MILLISECONDS.toNanos(durationInMillis);
long spentNanos = System.nanoTime() - startNanos;
long nanosLeft = durationNanos - spentNanos;
if (nanosLeft >= ONE_MILLISECOND_IN_NANOS) {
try {
long sleepMs = TimeUnit.NANOSECONDS.toMillis(nanosLeft);
log.trace("Going to sleep after poll: topic {} for {}ms", topic, sleepMs);
Thread.sleep(sleepMs);
} catch (InterruptedException e) {
if (!stopped) {
log.error("Failed to wait", e);
}
}
}
return emptyList();
}
@Override
public void commit() {
if (consumerLock.isLocked()) {
log.error("commit. consumerLock is locked. will wait with no timeout. it looks like a race conditions or deadlock topic " + topic, new RuntimeException("stacktrace"));
}
consumerLock.lock();
try {
doCommit();
} finally {
consumerLock.unlock();
}
}
@Override
public void stop() {
stopped = true;
}
@Override
public void unsubscribe() {
log.info("Unsubscribing and stopping consumer for topics {}", getFullTopicNames());
stopped = true;
consumerLock.lock();
try {
if (subscribed) {
doUnsubscribe();
}
} finally {
consumerLock.unlock();
}
}
@Override
public boolean isStopped() {
return stopped;
}
abstract protected List<R> doPoll(long durationInMillis);
abstract protected T decode(R record) throws IOException;
abstract protected void doSubscribe(List<String> topicNames);
abstract protected void doCommit();
abstract protected void doUnsubscribe();
@Override
public List<String> getFullTopicNames() {
if (partitions == null) {
return Collections.emptyList();
}
return partitions.stream().map(TopicPartitionInfo::getFullTopicName).collect(Collectors.toList());
}
protected boolean isLongPollingSupported() {
return false;
}
}

View File

@@ -0,0 +1,26 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue;
public interface Callback {
Callback EMPTY = new Callback() {
@Override
public void onSuccess() {
}
@Override
public void onFailure(Throwable t) {
}
};
void onSuccess();
void onFailure(Throwable t);
}

View File

@@ -0,0 +1,23 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue;
import lombok.Data;
@Data
public class DefaultQueueMsg implements QueueMsg {
private final String key;
private final byte[] data;
private final DefaultQueueMsgHeaders headers;
public DefaultQueueMsg(QueueMsg msg) {
this.key = msg.getKey();
this.data = msg.getData();
DefaultQueueMsgHeaders headers = new DefaultQueueMsgHeaders();
msg.getHeaders().getData().forEach(headers::put);
this.headers = headers;
}
}

View File

@@ -0,0 +1,29 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue;
import java.util.HashMap;
import java.util.Map;
public class DefaultQueueMsgHeaders implements QueueMsgHeaders {
protected final Map<String, byte[]> data = new HashMap<>();
@Override
public byte[] put(String key, byte[] value) {
return data.put(key, value);
}
@Override
public byte[] get(String key) {
return data.get(key);
}
@Override
public Map<String, byte[]> getData() {
return data;
}
}

View File

@@ -0,0 +1,38 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue;
import org.apache.kafka.clients.consumer.ConsumerRecord;
public class KafkaQueueMsg implements QueueMsg {
private final String key;
private final QueueMsgHeaders headers;
private final byte[] data;
public KafkaQueueMsg(ConsumerRecord<String, byte[]> record) {
this.key = record.key();
QueueMsgHeaders headers = new DefaultQueueMsgHeaders();
record.headers().forEach(header -> {
headers.put(header.key(), header.value());
});
this.headers = headers;
this.data = record.value();
}
@Override
public String getKey() {
return key;
}
@Override
public QueueMsgHeaders getHeaders() {
return headers;
}
@Override
public byte[] getData() {
return data;
}
}

View File

@@ -0,0 +1,32 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue;
import lombok.extern.slf4j.Slf4j;
import java.util.UUID;
@Slf4j
public class PackCallback<T> implements Callback {
private final PackProcessingContext<T> ctx;
private final UUID id;
public PackCallback(UUID id, PackProcessingContext<T> ctx) {
this.id = id;
this.ctx = ctx;
}
@Override
public void onSuccess() {
log.trace("[{}] ON SUCCESS", id);
ctx.onSuccess(id);
}
@Override
public void onFailure(Throwable t) {
log.trace("[{}] ON FAILURE", id, t);
ctx.onFailure(id, t);
}
}

View File

@@ -0,0 +1,75 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import java.util.UUID;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
@Slf4j
public class PackProcessingContext<T> {
private final AtomicInteger pendingCount;
private final CountDownLatch processingTimeoutLatch;
@Getter
private final ConcurrentMap<UUID, T> ackMap;
@Getter
private final ConcurrentMap<UUID, T> failedMap;
public PackProcessingContext(CountDownLatch processingTimeoutLatch,
ConcurrentMap<UUID, T> ackMap,
ConcurrentMap<UUID, T> failedMap) {
this.processingTimeoutLatch = processingTimeoutLatch;
this.pendingCount = new AtomicInteger(ackMap.size());
this.ackMap = ackMap;
this.failedMap = failedMap;
}
public boolean await(long packProcessingTimeout, TimeUnit milliseconds) throws InterruptedException {
return processingTimeoutLatch.await(packProcessingTimeout, milliseconds);
}
public void onSuccess(UUID id) {
boolean empty = false;
T msg = ackMap.remove(id);
if (msg != null) {
empty = pendingCount.decrementAndGet() == 0;
}
if (empty) {
processingTimeoutLatch.countDown();
} else {
if (log.isTraceEnabled()) {
log.trace("Items left: {}", ackMap.size());
for (T t : ackMap.values()) {
log.trace("left item: {}", t);
}
}
}
}
public void onFailure(UUID id, Throwable t) {
boolean empty = false;
T msg = ackMap.remove(id);
if (msg != null) {
empty = pendingCount.decrementAndGet() == 0;
failedMap.put(id, msg);
if (log.isTraceEnabled()) {
log.trace("Items left: {}", ackMap.size());
for (T v : ackMap.values()) {
log.trace("left item: {}", v);
}
}
}
if (empty) {
processingTimeoutLatch.countDown();
}
}
}

View File

@@ -0,0 +1,40 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue;
import lombok.Data;
@Data
public class ProtoQueueMsg<T extends com.google.protobuf.GeneratedMessageV3> implements QueueMsg {
private final String key;
protected final T value;
private final QueueMsgHeaders headers;
public ProtoQueueMsg(String key, T value) {
this(key, value, new DefaultQueueMsgHeaders());
}
public ProtoQueueMsg(String key, T value, QueueMsgHeaders headers) {
this.key = key;
this.value = value;
this.headers = headers;
}
@Override
public String getKey() {
return key;
}
@Override
public QueueMsgHeaders getHeaders() {
return headers;
}
@Override
public byte[] getData() {
return value.toByteArray();
}
}

View File

@@ -0,0 +1,17 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue;
public interface QueueAdmin {
default void createTopicIfNotExists(String topic) {
createTopicIfNotExists(topic, null);
}
void createTopicIfNotExists(String topic, String properties);
void destroy();
}

View File

@@ -0,0 +1,12 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue;
public interface QueueCallback {
void onSuccess(QueueMsgMetadata metadata);
void onFailure(Throwable t);
}

View File

@@ -0,0 +1,34 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue;
import sanbing.jcpp.infrastructure.queue.common.TopicPartitionInfo;
import java.util.List;
import java.util.Set;
public interface QueueConsumer<T extends QueueMsg> {
String getTopic();
void subscribe();
void subscribe(Set<TopicPartitionInfo> partitions);
void stop();
void unsubscribe();
List<T> poll(long durationInMillis);
void commit();
boolean isStopped();
List<String> getFullTopicNames();
}

View File

@@ -0,0 +1,14 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue;
public interface QueueMsg {
String getKey();
QueueMsgHeaders getHeaders();
byte[] getData();
}

View File

@@ -0,0 +1,16 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue;
import java.util.Map;
public interface QueueMsgHeaders {
byte[] put(String key, byte[] value);
byte[] get(String key);
Map<String, byte[]> getData();
}

View File

@@ -0,0 +1,8 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue;
public interface QueueMsgMetadata {
}

View File

@@ -0,0 +1,19 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue;
import sanbing.jcpp.infrastructure.queue.common.TopicPartitionInfo;
public interface QueueProducer<T extends QueueMsg> {
void init();
String getTopic();
void send(TopicPartitionInfo tpi, T msg, QueueCallback callback);
void stop();
}

View File

@@ -0,0 +1,13 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.common;
public interface QueueConfig {
boolean isConsumerPerPartition();
int getPollInterval();
}

View File

@@ -0,0 +1,17 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.common;
/**
* @author baigod
*/
public final class QueueConstants {
public static final String MSG_MD_PREFIX = "jcpp_";
public static final String MSG_MD_TS = "ts";
public static final String MSG_MD_PROTOCOL_SESSION_ID = "protocol_session_id";
}

View File

@@ -0,0 +1,59 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.common;
import lombok.Builder;
import lombok.Getter;
import lombok.ToString;
import java.util.Objects;
import java.util.Optional;
@ToString
public class TopicPartitionInfo {
@Getter
private final String topic;
private final Integer partition;
@Getter
private final String fullTopicName;
@Getter
private final boolean myPartition;
@Builder
public TopicPartitionInfo(String topic, Integer partition, boolean myPartition) {
this.topic = topic;
this.partition = partition;
this.myPartition = myPartition;
String tmp = topic;
if (partition != null) {
tmp += "." + partition;
}
this.fullTopicName = tmp;
}
public TopicPartitionInfo newByTopic(String topic) {
return new TopicPartitionInfo(topic, this.partition, this.myPartition);
}
public Optional<Integer> getPartition() {
return Optional.ofNullable(partition);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TopicPartitionInfo that = (TopicPartitionInfo) o;
return topic.equals(that.topic) &&
Objects.equals(partition, that.partition) &&
fullTopicName.equals(that.fullTopicName);
}
@Override
public int hashCode() {
return Objects.hash(fullTopicName);
}
}

View File

@@ -0,0 +1,109 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.discovery;
import jakarta.annotation.PostConstruct;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.RandomStringUtils;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
import org.springframework.util.StringUtils;
import sanbing.jcpp.infrastructure.util.SystemUtil;
import sanbing.jcpp.proto.gen.ClusterProto.ServiceInfo;
import sanbing.jcpp.proto.gen.ClusterProto.SystemInfoProto;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
/**
* @author baigod
*/
@Component
@Slf4j
public class DefaultServiceInfoProvider implements ServiceInfoProvider {
@Value("${service.id:#{null}}")
@Getter
private String serviceId;
@Getter
@Value("${service.type:monolith}")
private String serviceType;
private List<ServiceType> serviceTypes;
private ServiceInfo serviceInfo;
@Getter
private String serviceWebapiEndpoint;
@Value("${server.port}")
private String webapiPort;
@PostConstruct
public void init() throws UnknownHostException {
if (!StringUtils.hasText(this.serviceId)) {
try {
this.serviceId = InetAddress.getLocalHost().getHostName();
} catch (UnknownHostException e) {
this.serviceId = RandomStringUtils.randomAlphabetic(10);
}
}
log.info("Current Service ID: {}", this.serviceId);
serviceWebapiEndpoint = InetAddress.getLocalHost().getHostAddress() + ":" + webapiPort;
log.info("Current Service HostAddress: {}", this.serviceWebapiEndpoint);
if (serviceType.equalsIgnoreCase("monolith")) {
serviceTypes = List.of(ServiceType.values());
} else {
serviceTypes = Collections.singletonList(ServiceType.of(serviceType));
}
generateNewServiceInfoWithCurrentSystemInfo();
}
@Override
public boolean isMonolith() {
return "monolith".equals(getServiceType());
}
@Override
public ServiceInfo getServiceInfo() {
return serviceInfo;
}
@Override
public ServiceInfo generateNewServiceInfoWithCurrentSystemInfo() {
ServiceInfo.Builder builder = ServiceInfo.newBuilder()
.setServiceId(serviceId)
.addAllServiceTypes(serviceTypes.stream().map(ServiceType::name).collect(Collectors.toList()))
.setSystemInfo(getCurrentSystemInfoProto());
return serviceInfo = builder.build();
}
private SystemInfoProto getCurrentSystemInfoProto() {
SystemInfoProto.Builder builder = SystemInfoProto.newBuilder();
SystemUtil.getCpuUsage().ifPresent(builder::setCpuUsage);
SystemUtil.getMemoryUsage().ifPresent(builder::setMemoryUsage);
SystemUtil.getDiscSpaceUsage().ifPresent(builder::setDiskUsage);
SystemUtil.getCpuCount().ifPresent(builder::setCpuCount);
SystemUtil.getTotalMemory().ifPresent(builder::setTotalMemory);
SystemUtil.getTotalDiscSpace().ifPresent(builder::setTotalDiscSpace);
return builder.build();
}
}

View File

@@ -0,0 +1,15 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.discovery;
import sanbing.jcpp.proto.gen.ClusterProto.ServiceInfo;
import java.util.List;
public interface DiscoveryProvider {
List<ServiceInfo> getOtherServers();
}

View File

@@ -0,0 +1,41 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.discovery;
import lombok.extern.slf4j.Slf4j;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.event.ApplicationReadyEvent;
import org.springframework.stereotype.Service;
import sanbing.jcpp.infrastructure.util.annotation.AfterStartUp;
import sanbing.jcpp.proto.gen.ClusterProto.ServiceInfo;
import java.util.Collections;
import java.util.List;
@Service
@ConditionalOnProperty(prefix = "zk", value = "enabled", havingValue = "false", matchIfMissing = true)
@Slf4j
public class DummyDiscoveryProvider implements DiscoveryProvider {
private final ServiceInfoProvider serviceInfoProvider;
private final PartitionProvider partitionProvider;
public DummyDiscoveryProvider(ServiceInfoProvider serviceInfoProvider, PartitionProvider partitionProvider) {
this.serviceInfoProvider = serviceInfoProvider;
this.partitionProvider = partitionProvider;
}
@AfterStartUp(order = AfterStartUp.DISCOVERY_SERVICE)
public void onApplicationEvent(ApplicationReadyEvent event) {
partitionProvider.recalculatePartitions(serviceInfoProvider.getServiceInfo(), Collections.emptyList());
}
@Override
public List<ServiceInfo> getOtherServers() {
return Collections.emptyList();
}
}

View File

@@ -0,0 +1,199 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.discovery;
import com.google.common.hash.HashFunction;
import jakarta.annotation.PostConstruct;
import jakarta.annotation.Resource;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.ApplicationEventPublisher;
import org.springframework.stereotype.Component;
import sanbing.jcpp.infrastructure.queue.common.TopicPartitionInfo;
import sanbing.jcpp.infrastructure.queue.discovery.event.PartitionChangeEvent;
import sanbing.jcpp.proto.gen.ClusterProto.ServiceInfo;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.stream.Collectors;
import static sanbing.jcpp.infrastructure.util.JCPPHashUtil.forName;
import static sanbing.jcpp.infrastructure.util.JCPPHashUtil.hash;
/**
* @author baigod
*/
@Component
@Slf4j
@ConfigurationProperties("queue.partitions")
public class HashPartitionProvider implements PartitionProvider {
@Value("${queue.app.topic}")
private String appTopic;
@Value("${queue.app.partitions:10}")
private Integer appPartitions;
@Value("${queue.partitions.hash_function_name:murmur3_128}")
private String hashFunctionName;
private final ConcurrentMap<QueueKey, String> partitionTopicsMap = new ConcurrentHashMap<>();
private final ConcurrentMap<QueueKey, Integer> partitionSizesMap = new ConcurrentHashMap<>();
private HashFunction hashFunction;
protected volatile ConcurrentMap<QueueKey, List<Integer>> myPartitions = new ConcurrentHashMap<>();
@Resource
private ApplicationEventPublisher applicationEventPublisher;
@PostConstruct
public void init() {
this.hashFunction = forName(hashFunctionName);
QueueKey appKey = new QueueKey(ServiceType.APP);
partitionTopicsMap.put(appKey, appTopic);
partitionSizesMap.put(appKey, appPartitions);
}
private TopicPartitionInfo resolve(QueueKey queueKey, int hash) {
Integer partitionSize = partitionSizesMap.get(queueKey);
if (partitionSize == null) {
throw new IllegalStateException("Partitions info for queue " + queueKey + " is missing");
}
int partition = Math.abs(hash % partitionSize);
return buildTopicPartitionInfo(queueKey, partition);
}
@Override
public TopicPartitionInfo resolve(ServiceType serviceType, String queueName, UUID entityId) {
QueueKey queueKey = getQueueKey(serviceType, queueName);
return resolve(queueKey, hash(hashFunction, entityId));
}
@Override
public TopicPartitionInfo resolve(ServiceType serviceType, String queueName, String pileCode) {
QueueKey queueKey = getQueueKey(serviceType, queueName);
return resolve(queueKey, hash(hashFunction, pileCode));
}
private QueueKey getQueueKey(ServiceType serviceType, String queueName) {
QueueKey queueKey = new QueueKey(serviceType, queueName);
if (!partitionSizesMap.containsKey(queueKey)) {
queueKey = new QueueKey(serviceType);
}
return queueKey;
}
@Override
public synchronized void recalculatePartitions(ServiceInfo currentService, List<ServiceInfo> otherServices) {
log.info("Recalculating partitions");
logServiceInfo(currentService);
otherServices.forEach(this::logServiceInfo);
Map<QueueKey, List<ServiceInfo>> queueServicesMap = new HashMap<>();
addNode(currentService, queueServicesMap);
for (ServiceInfo other : otherServices) {
addNode(other, queueServicesMap);
}
queueServicesMap.values().forEach(list -> list.sort(Comparator.comparing(ServiceInfo::getServiceId)));
final ConcurrentMap<QueueKey, List<Integer>> newPartitions = new ConcurrentHashMap<>();
partitionSizesMap.forEach((queueKey, size) -> {
for (int i = 0; i < size; i++) {
try {
List<ServiceInfo> servers = queueServicesMap.get(queueKey);
ServiceInfo serviceInfo = servers == null || servers.isEmpty() ? null : servers.get(i % servers.size());
log.info("Server responsible for {}[{}] - {}", queueKey, i, serviceInfo != null ? serviceInfo.getServiceId() : "none");
if (currentService.equals(serviceInfo)) {
newPartitions.computeIfAbsent(queueKey, key -> new ArrayList<>()).add(i);
}
} catch (Exception e) {
log.warn("Failed to resolve server responsible for {}[{}]", queueKey, i, e);
}
}
});
final ConcurrentMap<QueueKey, List<Integer>> oldPartitions = myPartitions;
myPartitions = newPartitions;
log.info("Current Server responsible partitions: {}", myPartitions);
Map<QueueKey, Set<TopicPartitionInfo>> changedPartitionsMap = new HashMap<>();
Set<QueueKey> removed = new HashSet<>();
oldPartitions.forEach((queueKey, partitions) -> {
if (!newPartitions.containsKey(queueKey)) {
removed.add(queueKey);
}
});
removed.forEach(queueKey -> {
changedPartitionsMap.put(queueKey, Collections.emptySet());
});
myPartitions.forEach((queueKey, partitions) -> {
if (!partitions.equals(oldPartitions.get(queueKey))) {
Set<TopicPartitionInfo> tpiList = partitions.stream()
.map(partition -> buildTopicPartitionInfo(queueKey, partition))
.collect(Collectors.toSet());
changedPartitionsMap.put(queueKey, tpiList);
}
});
if (!changedPartitionsMap.isEmpty()) {
Map<ServiceType, Map<QueueKey, Set<TopicPartitionInfo>>> partitionsByServiceType = new HashMap<>();
changedPartitionsMap.forEach((queueKey, partitions) -> {
partitionsByServiceType.computeIfAbsent(queueKey.getType(), serviceType -> new HashMap<>())
.put(queueKey, partitions);
});
partitionsByServiceType.forEach(this::publishPartitionChangeEvent);
}
}
private void publishPartitionChangeEvent(ServiceType serviceType, Map<QueueKey, Set<TopicPartitionInfo>> partitionsMap) {
log.info("Partitions changed: {}", System.lineSeparator() + partitionsMap.entrySet().stream()
.map(entry -> "[" + entry.getKey() + "] - [" + entry.getValue().stream()
.map(tpi -> tpi.getPartition().orElse(-1).toString()).sorted()
.collect(Collectors.joining(", ")) + "]")
.collect(Collectors.joining(System.lineSeparator())));
PartitionChangeEvent event = new PartitionChangeEvent(this, serviceType, partitionsMap);
try {
applicationEventPublisher.publishEvent(event);
} catch (Exception e) {
log.error("Failed to publish partition change event {}", event, e);
}
}
private void logServiceInfo(ServiceInfo server) {
log.info("Found server: {}", server.getServiceId());
}
private void addNode(ServiceInfo instance, Map<QueueKey, List<ServiceInfo>> queueServiceList) {
for (String serviceTypeStr : instance.getServiceTypesList()) {
ServiceType serviceType = ServiceType.of(serviceTypeStr);
if (ServiceType.APP.equals(serviceType)) {
queueServiceList.computeIfAbsent(new QueueKey(serviceType), key -> new ArrayList<>()).add(instance);
}
}
}
private TopicPartitionInfo buildTopicPartitionInfo(QueueKey queueKey, int partition) {
List<Integer> partitions = myPartitions.get(queueKey);
return buildTopicPartitionInfo(queueKey, partition, partitions != null && partitions.contains(partition));
}
private TopicPartitionInfo buildTopicPartitionInfo(QueueKey queueKey, int partition, boolean myPartition) {
return TopicPartitionInfo.builder()
.topic(partitionTopicsMap.get(queueKey))
.partition(partition)
.myPartition(myPartition)
.build();
}
}

View File

@@ -0,0 +1,22 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.discovery;
import sanbing.jcpp.infrastructure.queue.common.TopicPartitionInfo;
import sanbing.jcpp.proto.gen.ClusterProto.ServiceInfo;
import java.util.List;
import java.util.UUID;
public interface PartitionProvider {
TopicPartitionInfo resolve(ServiceType serviceType,String queueName, UUID entityId);
TopicPartitionInfo resolve(ServiceType serviceType,String queueName, String pileCode);
void recalculatePartitions(ServiceInfo currentService, List<ServiceInfo> otherServices);
}

View File

@@ -0,0 +1,33 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.discovery;
import lombok.Data;
import lombok.With;
@Data
public class QueueKey {
public static final String MAIN_QUEUE_NAME = "Main";
private final ServiceType type;
@With
private final String queueName;
public QueueKey(ServiceType type, String queueName) {
this.type = type;
this.queueName = queueName;
}
public QueueKey(ServiceType type) {
this.type = type;
this.queueName = MAIN_QUEUE_NAME;
}
@Override
public String toString() {
return "QK(" + queueName + "," + type + ")";
}
}

View File

@@ -0,0 +1,26 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.discovery;
import sanbing.jcpp.proto.gen.ClusterProto;
/**
* @author baigod
*/
public interface ServiceInfoProvider {
String getServiceId();
String getServiceWebapiEndpoint();
String getServiceType();
boolean isMonolith();
ClusterProto.ServiceInfo getServiceInfo();
ClusterProto.ServiceInfo generateNewServiceInfoWithCurrentSystemInfo();
}

View File

@@ -0,0 +1,23 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.discovery;
import lombok.Getter;
import lombok.RequiredArgsConstructor;
@RequiredArgsConstructor
@Getter
public enum ServiceType {
APP("app"),
PROTOCOL("protocol");
private final String label;
public static ServiceType of(String serviceType) {
return ServiceType.valueOf(serviceType.toUpperCase());
}
}

View File

@@ -0,0 +1,328 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.discovery;
import com.google.protobuf.InvalidProtocolBufferException;
import jakarta.annotation.PostConstruct;
import jakarta.annotation.PreDestroy;
import lombok.SneakyThrows;
import lombok.extern.slf4j.Slf4j;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.framework.imps.CuratorFrameworkState;
import org.apache.curator.framework.recipes.cache.ChildData;
import org.apache.curator.framework.recipes.cache.CuratorCache;
import org.apache.curator.framework.recipes.cache.CuratorCacheListener;
import org.apache.curator.framework.state.ConnectionState;
import org.apache.curator.framework.state.ConnectionStateListener;
import org.apache.curator.retry.RetryForever;
import org.apache.curator.utils.CloseableUtils;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.event.ApplicationReadyEvent;
import org.springframework.context.ApplicationEventPublisher;
import org.springframework.stereotype.Service;
import org.springframework.util.Assert;
import sanbing.jcpp.infrastructure.queue.discovery.event.OtherServiceShutdownEvent;
import sanbing.jcpp.infrastructure.util.annotation.AfterStartUp;
import sanbing.jcpp.infrastructure.util.async.JCPPThreadFactory;
import sanbing.jcpp.proto.gen.ClusterProto.ServiceInfo;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.concurrent.*;
import java.util.stream.Collectors;
@Service
@ConditionalOnProperty(prefix = "zk", value = "enabled", havingValue = "true")
@Slf4j
public class ZkDiscoveryProvider implements DiscoveryProvider, CuratorCacheListener {
@Value("${zk.url}")
private String zkUrl;
@Value("${zk.retry-interval-ms}")
private Integer zkRetryInterval;
@Value("${zk.connection-timeout-ms}")
private Integer zkConnectionTimeout;
@Value("${zk.session-timeout-ms}")
private Integer zkSessionTimeout;
@Value("${zk.zk-dir}")
private String zkDir;
@Value("${zk.recalculate-delay:0}")
private Long recalculateDelay;
protected final ConcurrentHashMap<String, ScheduledFuture<?>> delayedTasks;
private final ApplicationEventPublisher applicationEventPublisher;
private final ServiceInfoProvider serviceInfoProvider;
private final PartitionProvider partitionProvider;
private ScheduledExecutorService zkExecutorService;
private CuratorFramework client;
private CuratorCache cache;
private String nodePath;
private String zkNodesDir;
private volatile boolean stopped = true;
public ZkDiscoveryProvider(ApplicationEventPublisher applicationEventPublisher,
ServiceInfoProvider serviceInfoProvider,
PartitionProvider partitionProvider) {
this.applicationEventPublisher = applicationEventPublisher;
this.serviceInfoProvider = serviceInfoProvider;
this.partitionProvider = partitionProvider;
delayedTasks = new ConcurrentHashMap<>();
}
@PostConstruct
public void init() {
log.info("Discovery Provider Initializing...");
Assert.hasLength(zkUrl, missingProperty("zk.url"));
Assert.notNull(zkRetryInterval, missingProperty("zk.retry-interval-ms"));
Assert.notNull(zkConnectionTimeout, missingProperty("zk.connection-timeout-ms"));
Assert.notNull(zkSessionTimeout, missingProperty("zk-session-timeout-ms"));
zkExecutorService = Executors.newSingleThreadScheduledExecutor(JCPPThreadFactory.forName("zk-discovery"));
zkNodesDir = zkDir + "/nodes";
initZkClient();
log.info("Initialization completed, using ZK connect string: {}", zkUrl);
}
@Override
public List<ServiceInfo> getOtherServers() {
return cache.stream()
.filter(cd -> !cd.getPath().equals(nodePath) && !cd.getPath().equals(zkNodesDir))
.map(cd -> {
try {
return ServiceInfo.parseFrom(cd.getData());
} catch (NoSuchElementException | InvalidProtocolBufferException e) {
log.error("Failed to decode ZK node", e);
throw new RuntimeException(e);
}
})
.collect(Collectors.toList());
}
@AfterStartUp(order = AfterStartUp.DISCOVERY_SERVICE)
public void onApplicationEvent(ApplicationReadyEvent event) {
if (stopped) {
log.debug("Ignoring application ready event. Service is stopped.");
return;
} else {
log.info("Received application ready event. Starting current ZK node.");
}
if (client.getState() != CuratorFrameworkState.STARTED) {
log.debug("Ignoring application ready event, ZK client is not started, ZK client state [{}]", client.getState());
return;
}
log.info("Going to publish current server...");
publishCurrentServer();
log.info("Going to recalculate partitions...");
recalculatePartitions();
zkExecutorService.scheduleAtFixedRate(this::publishCurrentServer, 1, 1, TimeUnit.MINUTES);
}
@SneakyThrows
public synchronized void publishCurrentServer() {
ServiceInfo self = serviceInfoProvider.getServiceInfo();
if (currentServerExists()) {
log.trace("[{}] Updating ZK node for current instance: {}", self.getServiceId(), nodePath);
client.setData().forPath(nodePath, serviceInfoProvider.generateNewServiceInfoWithCurrentSystemInfo().toByteArray());
} else {
try {
log.info("[{}] Creating ZK node for current instance", self.getServiceId());
nodePath = client.create()
.creatingParentsIfNeeded()
.withMode(CreateMode.EPHEMERAL_SEQUENTIAL)
.forPath(zkNodesDir + "/node-", self.toByteArray());
log.info("[{}] Created ZK node for current instance: {}", self.getServiceId(), nodePath);
client.getConnectionStateListenable().addListener(checkReconnect(self));
} catch (Exception e) {
log.error("Failed to create ZK node", e);
throw new RuntimeException(e);
}
}
}
private boolean currentServerExists() {
if (nodePath == null) {
return false;
}
try {
ServiceInfo self = serviceInfoProvider.getServiceInfo();
ServiceInfo registeredServerInfo = ServiceInfo.parseFrom(client.getData().forPath(nodePath));
if (self.equals(registeredServerInfo)) {
return true;
}
} catch (KeeperException.NoNodeException e) {
log.info("ZK node does not exist: {}", nodePath);
} catch (Exception e) {
log.error("Couldn't check if ZK node exists", e);
}
return false;
}
private ConnectionStateListener checkReconnect(ServiceInfo self) {
return (client, newState) -> {
log.info("[{}] ZK state changed: {}", self.getServiceId(), newState);
if (newState == ConnectionState.LOST) {
zkExecutorService.submit(this::reconnect);
}
};
}
private volatile boolean reconnectInProgress = false;
private synchronized void reconnect() {
if (!reconnectInProgress) {
reconnectInProgress = true;
try {
destroyZkClient();
initZkClient();
publishCurrentServer();
} catch (Exception e) {
log.error("Failed to reconnect to ZK: {}", e.getMessage(), e);
} finally {
reconnectInProgress = false;
}
}
}
private void initZkClient() {
try {
client = CuratorFrameworkFactory.newClient(zkUrl, zkSessionTimeout, zkConnectionTimeout, new RetryForever(zkRetryInterval));
client.start();
client.blockUntilConnected();
cache = CuratorCache.builder(client, zkNodesDir).build();
cache.listenable().addListener(this);
cache.start();
stopped = false;
log.info("ZK client connected");
} catch (Exception e) {
log.error("Failed to connect to ZK: {}", e.getMessage(), e);
CloseableUtils.closeQuietly(cache);
CloseableUtils.closeQuietly(client);
throw new RuntimeException(e);
}
}
private void unpublishCurrentServer() {
try {
if (nodePath != null) {
client.delete().forPath(nodePath);
}
} catch (Exception e) {
log.error("Failed to delete ZK node {}", nodePath, e);
throw new RuntimeException(e);
}
}
private void destroyZkClient() {
stopped = true;
try {
unpublishCurrentServer();
} catch (Exception ignored) {
}
CloseableUtils.closeQuietly(cache);
CloseableUtils.closeQuietly(client);
log.info("ZK client disconnected");
}
@PreDestroy
public void destroy() {
destroyZkClient();
zkExecutorService.shutdownNow();
log.info("Stopped zk discovery service");
}
public static String missingProperty(String propertyName) {
return "The " + propertyName + " property need to be set!";
}
@Override
public void event(Type type, ChildData oldData, ChildData data) {
if (stopped) {
log.info("Ignoring {}. Service is stopped.", type);
return;
}
if (client.getState() != CuratorFrameworkState.STARTED) {
log.info("Ignoring {}, ZK client is not started, ZK client state [{}]", type, client.getState());
return;
}
switch (type) {
case NODE_CREATED -> {
if (data == null || data.getData() == null) {
log.info("Ignoring {} due to empty created data", type);
return;
}
String serviceId = getServiceId(type, data);
ScheduledFuture<?> task = delayedTasks.remove(serviceId);
if (task != null) {
if (task.cancel(false)) {
log.info("[{}] Recalculate partitions ignored. Service was restarted in time.", serviceId);
} else {
log.info("[{}] Going to recalculate partitions. Service was not restarted in time!", serviceId);
recalculatePartitions();
}
} else {
log.info("[{}] Going to recalculate partitions due to adding new node.",
serviceId);
recalculatePartitions();
}
}
case NODE_DELETED -> {
if (oldData == null || oldData.getData() == null) {
log.info("Ignoring {} due to empty delete data", type);
return;
} else if (nodePath != null && nodePath.equals(oldData.getPath())) {
log.info("ZK node for current instance is somehow deleted.");
publishCurrentServer();
return;
}
String serviceId = getServiceId(type, oldData);
zkExecutorService.submit(() -> applicationEventPublisher.publishEvent(new OtherServiceShutdownEvent(this, serviceId)));
ScheduledFuture<?> future = zkExecutorService.schedule(() -> {
log.info("[{}] Going to recalculate partitions due to removed node", serviceId);
ScheduledFuture<?> removedTask = delayedTasks.remove(serviceId);
if (removedTask != null) {
recalculatePartitions();
}
}, recalculateDelay, TimeUnit.MILLISECONDS);
delayedTasks.put(serviceId, future);
}
default -> {
}
}
}
private static String getServiceId(Type type, ChildData data) {
ServiceInfo instance;
try {
instance = ServiceInfo.parseFrom(data.getData());
} catch (InvalidProtocolBufferException e) {
log.error("Failed to decode server instance for node {}", data.getPath(), e);
throw new RuntimeException(e);
}
String serviceId = instance.getServiceId();
log.info("Processing [{}] event for [{}]", type, serviceId);
return serviceId;
}
synchronized void recalculatePartitions() {
delayedTasks.values().forEach(future -> future.cancel(false));
delayedTasks.clear();
partitionProvider.recalculatePartitions(serviceInfoProvider.getServiceInfo(), getOtherServers());
}
}

View File

@@ -0,0 +1,26 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.discovery.event;
import lombok.Getter;
import lombok.ToString;
import org.springframework.context.ApplicationEvent;
import java.util.concurrent.atomic.AtomicInteger;
@ToString
public class JCPPApplicationEvent extends ApplicationEvent {
private static final AtomicInteger sequence = new AtomicInteger();
@Getter
private final int sequenceNumber;
public JCPPApplicationEvent(Object source) {
super(source);
sequenceNumber = sequence.incrementAndGet();
}
}

View File

@@ -0,0 +1,54 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.discovery.event;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.ApplicationListener;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
public abstract class JCPPApplicationEventListener<T extends JCPPApplicationEvent> implements ApplicationListener<T> {
private int lastProcessedSequenceNumber = Integer.MIN_VALUE;
private final Lock seqNumberLock = new ReentrantLock();
private final Logger log = LoggerFactory.getLogger(getClass());
@Override
public void onApplicationEvent(T event) {
if (!filterApplicationEvent(event)) {
log.trace("Skipping event due to filter: {}", event);
return;
}
boolean validUpdate = false;
seqNumberLock.lock();
try {
if (event.getSequenceNumber() > lastProcessedSequenceNumber) {
validUpdate = true;
lastProcessedSequenceNumber = event.getSequenceNumber();
}
} finally {
seqNumberLock.unlock();
}
if (validUpdate) {
try {
onJCPPApplicationEvent(event);
} catch (Exception e) {
log.error("Failed to handle partition change event: {}", event, e);
}
} else {
log.info("Application event ignored due to invalid sequence number ({} > {}). Event: {}", lastProcessedSequenceNumber, event.getSequenceNumber(), event);
}
}
protected abstract void onJCPPApplicationEvent(T event);
protected boolean filterApplicationEvent(T event) {
return true;
}
}

View File

@@ -0,0 +1,18 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.discovery.event;
import lombok.Getter;
public class OtherServiceShutdownEvent extends JCPPApplicationEvent {
@Getter
private final String serviceId;
public OtherServiceShutdownEvent(Object source, String serviceId) {
super(source);
this.serviceId = serviceId;
}
}

View File

@@ -0,0 +1,45 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.discovery.event;
import lombok.Getter;
import lombok.ToString;
import sanbing.jcpp.infrastructure.queue.common.TopicPartitionInfo;
import sanbing.jcpp.infrastructure.queue.discovery.QueueKey;
import sanbing.jcpp.infrastructure.queue.discovery.ServiceType;
import java.io.Serial;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import static sanbing.jcpp.infrastructure.queue.discovery.QueueKey.MAIN_QUEUE_NAME;
@ToString(callSuper = true)
public class PartitionChangeEvent extends JCPPApplicationEvent {
@Serial
private static final long serialVersionUID = -8731788167026510559L;
@Getter
private final Map<QueueKey, Set<TopicPartitionInfo>> partitionsMap;
public PartitionChangeEvent(Object source, ServiceType serviceType, Map<QueueKey, Set<TopicPartitionInfo>> partitionsMap) {
super(source);
this.partitionsMap = partitionsMap;
}
public Set<TopicPartitionInfo> getAppPartitions() {
return getPartitionsByServiceTypeAndQueueName(ServiceType.APP, MAIN_QUEUE_NAME);
}
private Set<TopicPartitionInfo> getPartitionsByServiceTypeAndQueueName(ServiceType serviceType, String queueName) {
return partitionsMap.entrySet()
.stream()
.filter(entry -> serviceType.equals(entry.getKey().getType()) && queueName.equals(entry.getKey().getQueueName()))
.flatMap(entry -> entry.getValue().stream())
.collect(Collectors.toSet());
}
}

View File

@@ -0,0 +1,94 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.kafka;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.admin.CreateTopicsResult;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.common.errors.TopicExistsException;
import sanbing.jcpp.infrastructure.queue.QueueAdmin;
import sanbing.jcpp.infrastructure.util.property.PropertyUtils;
import java.util.Collections;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
@Slf4j
public class KafkaAdmin implements QueueAdmin {
private final KafkaSettings settings;
private final Map<String, String> topicConfigs;
private final int numPartitions;
private volatile Set<String> topics;
private final short replicationFactor;
public KafkaAdmin(KafkaSettings settings, Map<String, String> topicConfigs) {
this.settings = settings;
this.topicConfigs = topicConfigs;
String numPartitionsStr = topicConfigs.get(KafkaTopicConfigs.NUM_PARTITIONS_SETTING);
if (numPartitionsStr != null) {
numPartitions = Integer.parseInt(numPartitionsStr);
topicConfigs.remove("partitions");
} else {
numPartitions = 1;
}
replicationFactor = settings.getReplicationFactor();
}
@Override
public void createTopicIfNotExists(String topic, String properties) {
Set<String> topics = getTopics();
if (topics.contains(topic)) {
return;
}
try {
NewTopic newTopic = new NewTopic(topic, numPartitions, replicationFactor).configs(PropertyUtils.getProps(topicConfigs, properties));
createTopic(newTopic).values().get(topic).get();
topics.add(topic);
} catch (ExecutionException ee) {
switch (ee.getCause()) {
case TopicExistsException ignored -> {
//do nothing
}
case null, default -> {
log.warn("[{}] Failed to create topic", topic, ee);
throw new RuntimeException(ee);
}
}
} catch (Exception e) {
log.warn("[{}] Failed to create topic", topic, e);
throw new RuntimeException(e);
}
}
private Set<String> getTopics() {
if (topics == null) {
synchronized (this) {
if (topics == null) {
topics = ConcurrentHashMap.newKeySet();
try {
topics.addAll(settings.getAdminClient().listTopics().names().get());
} catch (InterruptedException | ExecutionException e) {
log.error("Failed to get all topics.", e);
}
}
}
}
return topics;
}
public CreateTopicsResult createTopic(NewTopic topic) {
return settings.getAdminClient().createTopics(Collections.singletonList(topic));
}
@Override
public void destroy() {
}
}

View File

@@ -0,0 +1,29 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.kafka;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.NoArgsConstructor;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.stereotype.Component;
@Component
@ConditionalOnProperty(prefix = "queue", value = "type", havingValue = "kafka")
@Getter
@AllArgsConstructor
@NoArgsConstructor
public class KafkaConsumerStatisticConfig {
@Value("${queue.kafka.consumer-stats.enabled:true}")
private Boolean enabled;
@Value("${queue.kafka.consumer-stats.print-interval-ms:60000}")
private Long printIntervalMs;
@Value("${queue.kafka.consumer-stats.kafka-response-timeout-ms:1000}")
private Long kafkaResponseTimeoutMs;
}

View File

@@ -0,0 +1,158 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.kafka;
import jakarta.annotation.PostConstruct;
import jakarta.annotation.PreDestroy;
import lombok.Builder;
import lombok.Data;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.stereotype.Component;
import sanbing.jcpp.infrastructure.util.async.JCPPThreadFactory;
import java.time.Duration;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
@Slf4j
@Component
@RequiredArgsConstructor
@ConditionalOnProperty(prefix = "queue", value = "type", havingValue = "kafka")
public class KafkaConsumerStatsService {
private final Set<String> monitoredGroups = ConcurrentHashMap.newKeySet();
private final KafkaSettings kafkaSettings;
private final KafkaConsumerStatisticConfig statsConfig;
private Consumer<String, byte[]> consumer;
private ScheduledExecutorService statsPrintScheduler;
@PostConstruct
public void init() {
if (!statsConfig.getEnabled()) {
return;
}
this.statsPrintScheduler = Executors.newSingleThreadScheduledExecutor(JCPPThreadFactory.forName("kafka-consumer-stats", Thread.MAX_PRIORITY));
Properties consumerProps = kafkaSettings.toConsumerProps(null);
consumerProps.put(ConsumerConfig.CLIENT_ID_CONFIG, "consumer-stats-loader-client");
consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "consumer-stats-loader-client-group");
this.consumer = new KafkaConsumer<>(consumerProps);
startLogScheduling();
}
private void startLogScheduling() {
Duration timeoutDuration = Duration.ofMillis(statsConfig.getKafkaResponseTimeoutMs());
statsPrintScheduler.scheduleWithFixedDelay(() -> {
if (!isStatsPrintRequired()) {
return;
}
for (String groupId : monitoredGroups) {
try {
Map<TopicPartition, OffsetAndMetadata> groupOffsets = kafkaSettings.getAdminClient().listConsumerGroupOffsets(groupId).partitionsToOffsetAndMetadata()
.get(statsConfig.getKafkaResponseTimeoutMs(), TimeUnit.MILLISECONDS);
Map<TopicPartition, Long> endOffsets = consumer.endOffsets(groupOffsets.keySet(), timeoutDuration);
List<GroupTopicStats> lagTopicsStats = getTopicsStatsWithLag(groupOffsets, endOffsets);
if (!lagTopicsStats.isEmpty()) {
StringBuilder builder = new StringBuilder();
for (int i = 0; i < lagTopicsStats.size(); i++) {
builder.append(lagTopicsStats.get(i).toString());
if (i != lagTopicsStats.size() - 1) {
builder.append(", ");
}
}
log.info("[{}] Topic partitions with lag: [{}].", groupId, builder);
}
} catch (Exception e) {
log.warn("[{}] Failed to get consumer group stats. Reason - {}.", groupId, e.getMessage());
log.trace("Detailed error: ", e);
}
}
}, statsConfig.getPrintIntervalMs(), statsConfig.getPrintIntervalMs(), TimeUnit.MILLISECONDS);
}
private boolean isStatsPrintRequired() {
return log.isInfoEnabled() ;
}
private List<GroupTopicStats> getTopicsStatsWithLag(Map<TopicPartition, OffsetAndMetadata> groupOffsets, Map<TopicPartition, Long> endOffsets) {
List<GroupTopicStats> consumerGroupStats = new ArrayList<>();
for (TopicPartition topicPartition : groupOffsets.keySet()) {
long endOffset = endOffsets.get(topicPartition);
long committedOffset = groupOffsets.get(topicPartition).offset();
long lag = endOffset - committedOffset;
if (lag != 0) {
GroupTopicStats groupTopicStats = GroupTopicStats.builder()
.topic(topicPartition.topic())
.partition(topicPartition.partition())
.committedOffset(committedOffset)
.endOffset(endOffset)
.lag(lag)
.build();
consumerGroupStats.add(groupTopicStats);
}
}
return consumerGroupStats;
}
public void registerClientGroup(String groupId) {
if (statsConfig.getEnabled() && !StringUtils.isEmpty(groupId)) {
monitoredGroups.add(groupId);
}
}
public void unregisterClientGroup(String groupId) {
if (statsConfig.getEnabled() && !StringUtils.isEmpty(groupId)) {
monitoredGroups.remove(groupId);
}
}
@PreDestroy
public void destroy() {
if (statsPrintScheduler != null) {
statsPrintScheduler.shutdownNow();
}
if (consumer != null) {
consumer.close();
}
}
@Builder
@Data
private static class GroupTopicStats {
private String topic;
private int partition;
private long committedOffset;
private long endOffset;
private long lag;
@Override
public String toString() {
return "[" +
"topic=[" + topic + ']' +
", partition=[" + partition + "]" +
", committedOffset=[" + committedOffset + "]" +
", endOffset=[" + endOffset + "]" +
", lag=[" + lag + "]" +
"]";
}
}
}

View File

@@ -0,0 +1,117 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.kafka;
import lombok.Builder;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.springframework.util.StopWatch;
import sanbing.jcpp.infrastructure.queue.AbstractQueueConsumerTemplate;
import sanbing.jcpp.infrastructure.queue.KafkaQueueMsg;
import sanbing.jcpp.infrastructure.queue.QueueAdmin;
import sanbing.jcpp.infrastructure.queue.QueueMsg;
import java.io.IOException;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
@Slf4j
public class KafkaConsumerTemplate<T extends QueueMsg> extends AbstractQueueConsumerTemplate<ConsumerRecord<String, byte[]>, T> {
private final QueueAdmin admin;
private final KafkaConsumer<String, byte[]> consumer;
private final KafkaDecoder<T> decoder;
private final KafkaConsumerStatsService statsService;
private final String groupId;
@Builder
private KafkaConsumerTemplate(KafkaSettings settings, KafkaDecoder<T> decoder,
String clientId, String groupId, String topic,
QueueAdmin admin, KafkaConsumerStatsService statsService) {
super(topic);
Properties props = settings.toConsumerProps(topic);
props.put(ConsumerConfig.CLIENT_ID_CONFIG, clientId);
if (groupId != null) {
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
}
this.statsService = statsService;
this.groupId = groupId;
if (statsService != null) {
statsService.registerClientGroup(groupId);
}
this.admin = admin;
this.consumer = new KafkaConsumer<>(props);
this.decoder = decoder;
}
@Override
protected void doSubscribe(List<String> topicNames) {
if (!topicNames.isEmpty()) {
topicNames.forEach(admin::createTopicIfNotExists);
consumer.subscribe(topicNames);
} else {
log.info("unsubscribe due to empty topic list");
consumer.unsubscribe();
}
}
@Override
protected List<ConsumerRecord<String, byte[]>> doPoll(long durationInMillis) {
StopWatch stopWatch = new StopWatch();
stopWatch.start();
log.trace("poll topic {} maxDuration {}", getTopic(), durationInMillis);
ConsumerRecords<String, byte[]> records = consumer.poll(Duration.ofMillis(durationInMillis));
stopWatch.stop();
log.trace("poll topic {} took {}ms", getTopic(), stopWatch.getTotalTimeMillis());
if (records.isEmpty()) {
return Collections.emptyList();
} else {
List<ConsumerRecord<String, byte[]>> recordList = new ArrayList<>(256);
records.forEach(recordList::add);
return recordList;
}
}
@Override
public T decode(ConsumerRecord<String, byte[]> record) throws IOException {
return decoder.decode(new KafkaQueueMsg(record));
}
@Override
protected void doCommit() {
consumer.commitSync();
}
@Override
protected void doUnsubscribe() {
if (consumer != null) {
consumer.unsubscribe();
consumer.close();
}
if (statsService != null) {
statsService.unregisterClientGroup(groupId);
}
}
@Override
public boolean isLongPollingSupported() {
return true;
}
}

View File

@@ -0,0 +1,16 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.kafka;
import sanbing.jcpp.infrastructure.queue.QueueMsg;
import java.io.IOException;
public interface KafkaDecoder<T> {
T decode(QueueMsg msg) throws IOException;
}

View File

@@ -0,0 +1,133 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.kafka;
import lombok.Builder;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.internals.RecordHeader;
import sanbing.jcpp.infrastructure.queue.QueueAdmin;
import sanbing.jcpp.infrastructure.queue.QueueCallback;
import sanbing.jcpp.infrastructure.queue.QueueMsg;
import sanbing.jcpp.infrastructure.queue.QueueProducer;
import sanbing.jcpp.infrastructure.queue.common.TopicPartitionInfo;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.Objects;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
@Slf4j
public class KafkaProducerTemplate<T extends QueueMsg> implements QueueProducer<T> {
private final KafkaProducer<String, byte[]> producer;
@Getter
private final String topic;
@Getter
private final KafkaSettings settings;
private final QueueAdmin admin;
private final Set<TopicPartitionInfo> topics;
@Getter
private final String clientId;
@Builder
private KafkaProducerTemplate(KafkaSettings settings, String topic, String clientId, QueueAdmin admin) {
Properties props = settings.toProducerProps(topic);
this.clientId = Objects.requireNonNull(clientId, "Kafka producer client.id is null");
if (!StringUtils.isEmpty(clientId)) {
props.put(ProducerConfig.CLIENT_ID_CONFIG, clientId);
}
this.settings = settings;
this.producer = new KafkaProducer<>(props);
this.topic = topic;
this.admin = admin;
topics = ConcurrentHashMap.newKeySet();
}
@Override
public void init() {
}
void addAnalyticHeaders(List<Header> headers) {
headers.add(new RecordHeader("_producerId", getClientId().getBytes(StandardCharsets.UTF_8)));
headers.add(new RecordHeader("_threadName", Thread.currentThread().getName().getBytes(StandardCharsets.UTF_8)));
if (log.isTraceEnabled()) {
try {
StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace();
int maxLevel = Math.min(stackTrace.length, 20);
for (int i = 2; i < maxLevel; i++) { // ignore two levels: getStackTrace and addAnalyticHeaders
headers.add(new RecordHeader("_stackTrace" + i, stackTrace[i].toString().getBytes(StandardCharsets.UTF_8)));
}
} catch (Throwable t) {
log.trace("Failed to add stacktrace headers in Kafka producer {}", getClientId(), t);
}
}
}
@Override
public void send(TopicPartitionInfo tpi, T msg, QueueCallback callback) {
try {
createTopicIfNotExist(tpi);
String key = msg.getKey();
byte[] data = msg.getData();
ProducerRecord<String, byte[]> record;
List<Header> headers = msg.getHeaders().getData().entrySet().stream().map(e -> new RecordHeader(e.getKey(), e.getValue())).collect(Collectors.toList());
if (log.isDebugEnabled()) {
addAnalyticHeaders(headers);
}
record = new ProducerRecord<>(tpi.getFullTopicName(), null, key, data, headers);
producer.send(record, (metadata, exception) -> {
if (exception == null) {
if (callback != null) {
callback.onSuccess(new KafkaQueueMsgMetadata(metadata));
}
} else {
if (callback != null) {
callback.onFailure(exception);
} else {
log.warn("Producer template failure: {}", exception.getMessage(), exception);
}
}
});
} catch (Exception e) {
if (callback != null) {
callback.onFailure(e);
} else {
log.warn("Producer template failure (send method wrapper): {}", e.getMessage(), e);
}
throw e;
}
}
private void createTopicIfNotExist(TopicPartitionInfo tpi) {
if (topics.contains(tpi)) {
return;
}
admin.createTopicIfNotExists(tpi.getFullTopicName());
topics.add(tpi);
}
@Override
public void stop() {
if (producer != null) {
producer.close();
}
}
}

View File

@@ -0,0 +1,17 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.kafka;
import lombok.AllArgsConstructor;
import lombok.Data;
import org.apache.kafka.clients.producer.RecordMetadata;
import sanbing.jcpp.infrastructure.queue.QueueMsgMetadata;
@Data
@AllArgsConstructor
public class KafkaQueueMsgMetadata implements QueueMsgMetadata {
private RecordMetadata metadata;
}

View File

@@ -0,0 +1,210 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.kafka;
import jakarta.annotation.PreDestroy;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.AdminClientConfig;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.config.SslConfigs;
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.stereotype.Component;
import sanbing.jcpp.infrastructure.util.property.JCPPProperty;
import sanbing.jcpp.infrastructure.util.property.PropertyUtils;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Properties;
@Slf4j
@ConditionalOnProperty(prefix = "queue", value = "type", havingValue = "kafka")
@ConfigurationProperties(prefix = "queue.kafka")
@Component
public class KafkaSettings {
@Value("${queue.kafka.bootstrap-servers}")
private String servers;
@Value("${queue.kafka.ssl.enabled:false}")
private boolean sslEnabled;
@Value("${queue.kafka.ssl.truststore-location:}")
private String sslTruststoreLocation;
@Value("${queue.kafka.ssl.truststore-password:}")
private String sslTruststorePassword;
@Value("${queue.kafka.ssl.keystore-location:}")
private String sslKeystoreLocation;
@Value("${queue.kafka.ssl.keystore-password:}")
private String sslKeystorePassword;
@Value("${queue.kafka.ssl.key-password:}")
private String sslKeyPassword;
@Value("${queue.kafka.acks:all}")
private String acks;
@Value("${queue.kafka.retries:1}")
private int retries;
@Value("${queue.kafka.compression-type:none}")
private String compressionType;
@Value("${queue.kafka.batch-size:16384}")
private int batchSize;
@Value("${queue.kafka.linger-ms:1}")
private long lingerMs;
@Value("${queue.kafka.max-request-size:1048576}")
private int maxRequestSize;
@Value("${queue.kafka.max-in-flight-requests-per-connection:5}")
private int maxInFlightRequestsPerConnection;
@Value("${queue.kafka.buffer-memory:33554432}")
private long bufferMemory;
@Value("${queue.kafka.replication-factor:1}")
@Getter
private short replicationFactor;
@Value("${queue.kafka.max-poll-records:8192}")
private int maxPollRecords;
@Value("${queue.kafka.max-poll-interval-ms:300000}")
private int maxPollIntervalMs;
@Value("${queue.kafka.max-partition-fetch-bytes:16777216}")
private int maxPartitionFetchBytes;
@Value("${queue.kafka.fetch-max-bytes:134217728}")
private int fetchMaxBytes;
@Value("${queue.kafka.request-timeout-ms:30000}")
private int requestTimeoutMs;
@Value("${queue.kafka.session-timeout-ms:10000}")
private int sessionTimeoutMs;
@Value("${queue.kafka.auto-offset-reset:earliest}")
private String autoOffsetReset;
@Value("${queue.kafka.other-inline:}")
private String otherInline;
@Setter
private Map<String, List<JCPPProperty>> consumerPropertiesPerTopic = Collections.emptyMap();
@Setter
private Map<String, List<JCPPProperty>> producerPropertiesPerTopic = Collections.emptyMap();
private volatile AdminClient adminClient;
public Properties toConsumerProps(String topic) {
Properties props = toProps();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
props.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, maxPartitionFetchBytes);
props.put(ConsumerConfig.FETCH_MAX_BYTES_CONFIG, fetchMaxBytes);
props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalMs);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
consumerPropertiesPerTopic
.getOrDefault(topic, Collections.emptyList())
.forEach(kv -> props.put(kv.getKey(), kv.getValue()));
return props;
}
public Properties toProducerProps(String topic) {
Properties props = toProps();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
props.put(ProducerConfig.RETRIES_CONFIG, retries);
props.put(ProducerConfig.ACKS_CONFIG, acks);
props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
props.put(ProducerConfig.LINGER_MS_CONFIG, lingerMs);
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, compressionType);
props.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, maxRequestSize);
props.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, maxInFlightRequestsPerConnection);
producerPropertiesPerTopic
.getOrDefault(topic, Collections.emptyList())
.forEach(kv -> props.put(kv.getKey(), kv.getValue()));
return props;
}
Properties toProps() {
Properties props = new Properties();
props.put(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG, requestTimeoutMs);
props.put(CommonClientConfigs.SESSION_TIMEOUT_MS_CONFIG, sessionTimeoutMs);
props.putAll(PropertyUtils.getProps(otherInline));
configureSSL(props);
return props;
}
void configureSSL(Properties props) {
if (sslEnabled) {
props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL");
props.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, sslTruststoreLocation);
props.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, sslTruststorePassword);
props.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, sslKeystoreLocation);
props.put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, sslKeystorePassword);
props.put(SslConfigs.SSL_KEY_PASSWORD_CONFIG, sslKeyPassword);
}
}
public AdminClient getAdminClient() {
if (adminClient == null) {
synchronized (this) {
if (adminClient == null) {
adminClient = AdminClient.create(toAdminProps());
}
}
}
return adminClient;
}
protected Properties toAdminProps() {
Properties props = toProps();
props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
props.put(AdminClientConfig.RETRIES_CONFIG, retries);
return props;
}
@PreDestroy
private void destroy() {
if (adminClient != null) {
adminClient.close();
}
}
}

View File

@@ -0,0 +1,32 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.kafka;
import jakarta.annotation.PostConstruct;
import lombok.Getter;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.stereotype.Component;
import sanbing.jcpp.infrastructure.util.property.PropertyUtils;
import java.util.Map;
@Component
@ConditionalOnProperty(prefix = "queue", value = "type", havingValue = "kafka")
public class KafkaTopicConfigs {
public static final String NUM_PARTITIONS_SETTING = "partitions";
@Value("${queue.kafka.topic-properties.app:}")
private String appProperties;
@Getter
private Map<String, String> appConfigs;
@PostConstruct
private void init() {
this.appConfigs = PropertyUtils.getProps(appProperties);
}
}

View File

@@ -0,0 +1,66 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.memory;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Component;
import sanbing.jcpp.infrastructure.queue.QueueMsg;
import java.util.*;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.LinkedBlockingQueue;
@Component
@Slf4j
public final class DefaultInMemoryStorage implements InMemoryStorage {
private final ConcurrentHashMap<String, BlockingQueue<QueueMsg>> storage = new ConcurrentHashMap<>();
@Override
public void printStats() {
if (log.isDebugEnabled()) {
storage.forEach((topic, queue) -> {
if (!queue.isEmpty()) {
log.debug("[{}] Queue Size [{}]", topic, queue.size());
}
});
}
}
@Override
public int getLagTotal() {
return storage.values().stream().map(BlockingQueue::size).reduce(0, Integer::sum);
}
@Override
public int getLag(String topic) {
return Optional.ofNullable(storage.get(topic)).map(Collection::size).orElse(0);
}
@Override
public boolean put(String topic, QueueMsg msg) {
return storage.computeIfAbsent(topic, (t) -> new LinkedBlockingQueue<>()).add(msg);
}
@Override
public List<QueueMsg> get(String topic) throws InterruptedException {
final BlockingQueue<QueueMsg> queue = storage.get(topic);
if (queue != null) {
final QueueMsg firstMsg = queue.poll();
if (firstMsg != null) {
final int queueSize = queue.size();
if (queueSize > 0) {
final List<QueueMsg> entities = new ArrayList<>(Math.min(queueSize, 999) + 1);
entities.add(firstMsg);
queue.drainTo(entities, 999);
return entities;
}
return Collections.singletonList(firstMsg);
}
}
return Collections.emptyList();
}
}

View File

@@ -0,0 +1,106 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.memory;
import lombok.extern.slf4j.Slf4j;
import sanbing.jcpp.infrastructure.queue.QueueConsumer;
import sanbing.jcpp.infrastructure.queue.QueueMsg;
import sanbing.jcpp.infrastructure.queue.common.TopicPartitionInfo;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
@Slf4j
public class InMemoryQueueConsumer<T extends QueueMsg> implements QueueConsumer<T> {
private final InMemoryStorage storage;
private volatile Set<TopicPartitionInfo> partitions;
private volatile boolean stopped;
private volatile boolean subscribed;
public InMemoryQueueConsumer(InMemoryStorage storage, String topic) {
this.storage = storage;
this.topic = topic;
stopped = false;
}
private final String topic;
@Override
public String getTopic() {
return topic;
}
@Override
public void subscribe() {
partitions = Collections.singleton(new TopicPartitionInfo(topic, null, true));
subscribed = true;
}
@Override
public void subscribe(Set<TopicPartitionInfo> partitions) {
this.partitions = partitions;
subscribed = true;
}
@Override
public void stop() {
stopped = true;
}
@Override
public void unsubscribe() {
stopped = true;
subscribed = false;
}
@Override
public List<T> poll(long durationInMillis) {
if (subscribed) {
@SuppressWarnings("unchecked")
List<T> messages = partitions
.stream()
.map(tpi -> {
try {
return storage.get(tpi.getFullTopicName());
} catch (InterruptedException e) {
if (!stopped) {
log.error("Queue was interrupted.", e);
}
return Collections.emptyList();
}
})
.flatMap(List::stream)
.map(msg -> (T) msg).collect(Collectors.toList());
if (!messages.isEmpty()) {
return messages;
}
try {
Thread.sleep(durationInMillis);
} catch (InterruptedException e) {
if (!stopped) {
log.error("Failed to sleep.", e);
}
}
}
return Collections.emptyList();
}
@Override
public void commit() {
}
@Override
public boolean isStopped() {
return stopped;
}
@Override
public List<String> getFullTopicNames() {
return partitions.stream().map(TopicPartitionInfo::getFullTopicName).collect(Collectors.toList());
}
}

View File

@@ -0,0 +1,48 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.memory;
import lombok.Data;
import sanbing.jcpp.infrastructure.queue.QueueCallback;
import sanbing.jcpp.infrastructure.queue.QueueMsg;
import sanbing.jcpp.infrastructure.queue.QueueProducer;
import sanbing.jcpp.infrastructure.queue.common.TopicPartitionInfo;
@Data
public class InMemoryQueueProducer<T extends QueueMsg> implements QueueProducer<T> {
private final InMemoryStorage storage;
private final String topic;
public InMemoryQueueProducer(InMemoryStorage storage, String topic) {
this.storage = storage;
this.topic = topic;
}
@Override
public void init() {
}
@Override
public void send(TopicPartitionInfo tpi, T msg, QueueCallback callback) {
boolean result = storage.put(tpi.getFullTopicName(), msg);
if (result) {
if (callback != null) {
callback.onSuccess(null);
}
} else {
if (callback != null) {
callback.onFailure(new RuntimeException("Failure add msg to InMemoryQueue"));
}
}
}
@Override
public void stop() {
}
}

View File

@@ -0,0 +1,24 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.memory;
import sanbing.jcpp.infrastructure.queue.QueueMsg;
import java.util.List;
public interface InMemoryStorage {
void printStats();
int getLagTotal();
int getLag(String topic);
boolean put(String topic, QueueMsg msg);
List<QueueMsg> get(String topic) throws InterruptedException;
}

View File

@@ -0,0 +1,22 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.processing;
import lombok.Getter;
import sanbing.jcpp.infrastructure.queue.ProtoQueueMsg;
import java.util.UUID;
public class IdMsgPair<T extends com.google.protobuf.GeneratedMessageV3> {
@Getter
final UUID uuid;
@Getter
final ProtoQueueMsg<T> msg;
public IdMsgPair(UUID uuid, ProtoQueueMsg<T> msg) {
this.uuid = uuid;
this.msg = msg;
}
}

View File

@@ -0,0 +1,18 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.provider;
import sanbing.jcpp.infrastructure.queue.ProtoQueueMsg;
import sanbing.jcpp.infrastructure.queue.QueueConsumer;
import sanbing.jcpp.infrastructure.queue.QueueProducer;
import sanbing.jcpp.proto.gen.ProtocolProto.UplinkQueueMessage;
public interface AppQueueFactory {
QueueConsumer<ProtoQueueMsg<UplinkQueueMessage>> createProtocolUplinkMsgConsumer();
QueueProducer<ProtoQueueMsg<UplinkQueueMessage>> createProtocolUplinkMsgProducer(String topic);
}

View File

@@ -0,0 +1,48 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.provider;
import lombok.extern.slf4j.Slf4j;
import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;
import sanbing.jcpp.infrastructure.queue.ProtoQueueMsg;
import sanbing.jcpp.infrastructure.queue.QueueConsumer;
import sanbing.jcpp.infrastructure.queue.QueueProducer;
import sanbing.jcpp.infrastructure.queue.memory.InMemoryQueueConsumer;
import sanbing.jcpp.infrastructure.queue.memory.InMemoryQueueProducer;
import sanbing.jcpp.infrastructure.queue.memory.InMemoryStorage;
import sanbing.jcpp.infrastructure.queue.settings.QueueAppSettings;
import sanbing.jcpp.proto.gen.ProtocolProto.UplinkQueueMessage;
@Slf4j
@Component
@ConditionalOnExpression("'${queue.type:null}'=='memory' && '${service.type:null}'=='monolith'")
public class InMemoryAppQueueFactory implements AppQueueFactory {
private final InMemoryStorage storage;
private final QueueAppSettings appSettings;
public InMemoryAppQueueFactory(InMemoryStorage storage, QueueAppSettings appSettings) {
this.storage = storage;
this.appSettings = appSettings;
}
@Override
public QueueConsumer<ProtoQueueMsg<UplinkQueueMessage>> createProtocolUplinkMsgConsumer() {
return new InMemoryQueueConsumer<>(storage, appSettings.getTopic());
}
@Override
public QueueProducer<ProtoQueueMsg<UplinkQueueMessage>> createProtocolUplinkMsgProducer(String topic) {
return new InMemoryQueueProducer<>(storage, topic);
}
@Scheduled(fixedRateString = "${queue.in_memory.stats.print-interval-ms:60000}")
private void printInMemoryStats() {
storage.printStats();
}
}

View File

@@ -0,0 +1,83 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.provider;
import com.google.protobuf.util.JsonFormat;
import jakarta.annotation.PreDestroy;
import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression;
import org.springframework.stereotype.Component;
import sanbing.jcpp.infrastructure.queue.ProtoQueueMsg;
import sanbing.jcpp.infrastructure.queue.QueueAdmin;
import sanbing.jcpp.infrastructure.queue.QueueConsumer;
import sanbing.jcpp.infrastructure.queue.QueueProducer;
import sanbing.jcpp.infrastructure.queue.discovery.ServiceInfoProvider;
import sanbing.jcpp.infrastructure.queue.kafka.*;
import sanbing.jcpp.infrastructure.queue.settings.QueueAppSettings;
import sanbing.jcpp.proto.gen.ProtocolProto.UplinkQueueMessage;
@Component
@ConditionalOnExpression("'${queue.type:null}'=='kafka'")
public class KafkaAppQueueFactory implements AppQueueFactory {
private final KafkaSettings kafkaSettings;
private final QueueAppSettings appSettings;
private final KafkaConsumerStatsService consumerStatsService;
private final ServiceInfoProvider serviceInfoProvider;
private final QueueAdmin appAdmin;
public KafkaAppQueueFactory(KafkaSettings kafkaSettings,
ServiceInfoProvider serviceInfoProvider,
QueueAppSettings appSettings,
KafkaConsumerStatsService consumerStatsService,
KafkaTopicConfigs kafkaTopicConfigs) {
this.kafkaSettings = kafkaSettings;
this.serviceInfoProvider = serviceInfoProvider;
this.appSettings = appSettings;
this.consumerStatsService = consumerStatsService;
this.appAdmin = new KafkaAdmin(kafkaSettings, kafkaTopicConfigs.getAppConfigs());
}
@Override
public QueueConsumer<ProtoQueueMsg<UplinkQueueMessage>> createProtocolUplinkMsgConsumer() {
KafkaConsumerTemplate.KafkaConsumerTemplateBuilder<ProtoQueueMsg<UplinkQueueMessage>> consumerBuilder = KafkaConsumerTemplate.builder();
consumerBuilder.settings(kafkaSettings);
consumerBuilder.topic(appSettings.getTopic());
consumerBuilder.clientId("protocol-uplink-consumer-" + serviceInfoProvider.getServiceId());
consumerBuilder.groupId("protocol-uplink-consumer");
if (appSettings.getDecoder() == QueueAppSettings.DecoderType.protobuf) {
consumerBuilder.decoder(msg -> new ProtoQueueMsg<>(msg.getKey(), UplinkQueueMessage.parseFrom(msg.getData()), msg.getHeaders()));
} else {
consumerBuilder.decoder(msg -> {
UplinkQueueMessage.Builder builder = UplinkQueueMessage.newBuilder();
JsonFormat.parser().merge(new String(msg.getData()), builder);
return new ProtoQueueMsg<>(msg.getKey(), builder.build(), msg.getHeaders());
});
}
consumerBuilder.admin(appAdmin);
consumerBuilder.statsService(consumerStatsService);
return consumerBuilder.build();
}
@Override
public QueueProducer<ProtoQueueMsg<UplinkQueueMessage>> createProtocolUplinkMsgProducer(String topic) {
KafkaProducerTemplate.KafkaProducerTemplateBuilder<ProtoQueueMsg<UplinkQueueMessage>> requestBuilder = KafkaProducerTemplate.builder();
requestBuilder.settings(kafkaSettings);
requestBuilder.clientId("protocol-to-app-" + serviceInfoProvider.getServiceId());
requestBuilder.topic(topic);
requestBuilder.admin(appAdmin);
return requestBuilder.build();
}
@PreDestroy
private void destroy() {
if (appAdmin != null) {
appAdmin.destroy();
}
}
}

View File

@@ -0,0 +1,30 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.queue.settings;
import lombok.Data;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Lazy;
import org.springframework.stereotype.Component;
@Lazy
@Data
@Component
public class QueueAppSettings {
@Value("${queue.app.topic}")
private String topic;
@Value("${queue.app.partitions}")
private int partitions;
@Value("${queue.app.decoder:protobuf}")
private DecoderType decoder;
public enum DecoderType {
protobuf,
json
}
}