mirror of
https://gitee.com/san-bing/JChargePointProtocol
synced 2026-05-09 20:39:55 +08:00
云快充1.5.0 初始化
This commit is contained in:
@@ -0,0 +1,109 @@
|
||||
/**
|
||||
* 抖音关注:程序员三丙
|
||||
* 知识星球:https://t.zsxq.com/j9b21
|
||||
*/
|
||||
package sanbing.jcpp.infrastructure.queue.discovery;
|
||||
|
||||
import jakarta.annotation.PostConstruct;
|
||||
import lombok.Getter;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.commons.lang3.RandomStringUtils;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.stereotype.Component;
|
||||
import org.springframework.util.StringUtils;
|
||||
import sanbing.jcpp.infrastructure.util.SystemUtil;
|
||||
import sanbing.jcpp.proto.gen.ClusterProto.ServiceInfo;
|
||||
import sanbing.jcpp.proto.gen.ClusterProto.SystemInfoProto;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
|
||||
/**
|
||||
* @author baigod
|
||||
*/
|
||||
@Component
|
||||
@Slf4j
|
||||
public class DefaultServiceInfoProvider implements ServiceInfoProvider {
|
||||
|
||||
@Value("${service.id:#{null}}")
|
||||
@Getter
|
||||
private String serviceId;
|
||||
|
||||
@Getter
|
||||
@Value("${service.type:monolith}")
|
||||
private String serviceType;
|
||||
|
||||
private List<ServiceType> serviceTypes;
|
||||
|
||||
private ServiceInfo serviceInfo;
|
||||
|
||||
@Getter
|
||||
private String serviceWebapiEndpoint;
|
||||
|
||||
@Value("${server.port}")
|
||||
private String webapiPort;
|
||||
|
||||
@PostConstruct
|
||||
public void init() throws UnknownHostException {
|
||||
|
||||
|
||||
if (!StringUtils.hasText(this.serviceId)) {
|
||||
try {
|
||||
this.serviceId = InetAddress.getLocalHost().getHostName();
|
||||
} catch (UnknownHostException e) {
|
||||
this.serviceId = RandomStringUtils.randomAlphabetic(10);
|
||||
}
|
||||
}
|
||||
log.info("Current Service ID: {}", this.serviceId);
|
||||
|
||||
serviceWebapiEndpoint = InetAddress.getLocalHost().getHostAddress() + ":" + webapiPort;
|
||||
log.info("Current Service HostAddress: {}", this.serviceWebapiEndpoint);
|
||||
if (serviceType.equalsIgnoreCase("monolith")) {
|
||||
serviceTypes = List.of(ServiceType.values());
|
||||
} else {
|
||||
serviceTypes = Collections.singletonList(ServiceType.of(serviceType));
|
||||
}
|
||||
|
||||
generateNewServiceInfoWithCurrentSystemInfo();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean isMonolith() {
|
||||
return "monolith".equals(getServiceType());
|
||||
}
|
||||
|
||||
@Override
|
||||
public ServiceInfo getServiceInfo() {
|
||||
return serviceInfo;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ServiceInfo generateNewServiceInfoWithCurrentSystemInfo() {
|
||||
ServiceInfo.Builder builder = ServiceInfo.newBuilder()
|
||||
.setServiceId(serviceId)
|
||||
.addAllServiceTypes(serviceTypes.stream().map(ServiceType::name).collect(Collectors.toList()))
|
||||
.setSystemInfo(getCurrentSystemInfoProto());
|
||||
return serviceInfo = builder.build();
|
||||
}
|
||||
|
||||
private SystemInfoProto getCurrentSystemInfoProto() {
|
||||
SystemInfoProto.Builder builder = SystemInfoProto.newBuilder();
|
||||
|
||||
SystemUtil.getCpuUsage().ifPresent(builder::setCpuUsage);
|
||||
SystemUtil.getMemoryUsage().ifPresent(builder::setMemoryUsage);
|
||||
SystemUtil.getDiscSpaceUsage().ifPresent(builder::setDiskUsage);
|
||||
|
||||
SystemUtil.getCpuCount().ifPresent(builder::setCpuCount);
|
||||
SystemUtil.getTotalMemory().ifPresent(builder::setTotalMemory);
|
||||
SystemUtil.getTotalDiscSpace().ifPresent(builder::setTotalDiscSpace);
|
||||
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -0,0 +1,15 @@
|
||||
/**
|
||||
* 抖音关注:程序员三丙
|
||||
* 知识星球:https://t.zsxq.com/j9b21
|
||||
*/
|
||||
package sanbing.jcpp.infrastructure.queue.discovery;
|
||||
|
||||
import sanbing.jcpp.proto.gen.ClusterProto.ServiceInfo;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public interface DiscoveryProvider {
|
||||
|
||||
List<ServiceInfo> getOtherServers();
|
||||
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
/**
|
||||
* 抖音关注:程序员三丙
|
||||
* 知识星球:https://t.zsxq.com/j9b21
|
||||
*/
|
||||
package sanbing.jcpp.infrastructure.queue.discovery;
|
||||
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.boot.context.event.ApplicationReadyEvent;
|
||||
import org.springframework.stereotype.Service;
|
||||
import sanbing.jcpp.infrastructure.util.annotation.AfterStartUp;
|
||||
import sanbing.jcpp.proto.gen.ClusterProto.ServiceInfo;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
@Service
|
||||
@ConditionalOnProperty(prefix = "zk", value = "enabled", havingValue = "false", matchIfMissing = true)
|
||||
@Slf4j
|
||||
public class DummyDiscoveryProvider implements DiscoveryProvider {
|
||||
|
||||
private final ServiceInfoProvider serviceInfoProvider;
|
||||
private final PartitionProvider partitionProvider;
|
||||
|
||||
public DummyDiscoveryProvider(ServiceInfoProvider serviceInfoProvider, PartitionProvider partitionProvider) {
|
||||
this.serviceInfoProvider = serviceInfoProvider;
|
||||
this.partitionProvider = partitionProvider;
|
||||
}
|
||||
|
||||
|
||||
@AfterStartUp(order = AfterStartUp.DISCOVERY_SERVICE)
|
||||
public void onApplicationEvent(ApplicationReadyEvent event) {
|
||||
partitionProvider.recalculatePartitions(serviceInfoProvider.getServiceInfo(), Collections.emptyList());
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<ServiceInfo> getOtherServers() {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,199 @@
|
||||
/**
|
||||
* 抖音关注:程序员三丙
|
||||
* 知识星球:https://t.zsxq.com/j9b21
|
||||
*/
|
||||
package sanbing.jcpp.infrastructure.queue.discovery;
|
||||
|
||||
import com.google.common.hash.HashFunction;
|
||||
import jakarta.annotation.PostConstruct;
|
||||
import jakarta.annotation.Resource;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.context.ApplicationEventPublisher;
|
||||
import org.springframework.stereotype.Component;
|
||||
import sanbing.jcpp.infrastructure.queue.common.TopicPartitionInfo;
|
||||
import sanbing.jcpp.infrastructure.queue.discovery.event.PartitionChangeEvent;
|
||||
import sanbing.jcpp.proto.gen.ClusterProto.ServiceInfo;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static sanbing.jcpp.infrastructure.util.JCPPHashUtil.forName;
|
||||
import static sanbing.jcpp.infrastructure.util.JCPPHashUtil.hash;
|
||||
|
||||
/**
|
||||
* @author baigod
|
||||
*/
|
||||
@Component
|
||||
@Slf4j
|
||||
@ConfigurationProperties("queue.partitions")
|
||||
public class HashPartitionProvider implements PartitionProvider {
|
||||
|
||||
@Value("${queue.app.topic}")
|
||||
private String appTopic;
|
||||
@Value("${queue.app.partitions:10}")
|
||||
private Integer appPartitions;
|
||||
@Value("${queue.partitions.hash_function_name:murmur3_128}")
|
||||
private String hashFunctionName;
|
||||
|
||||
private final ConcurrentMap<QueueKey, String> partitionTopicsMap = new ConcurrentHashMap<>();
|
||||
private final ConcurrentMap<QueueKey, Integer> partitionSizesMap = new ConcurrentHashMap<>();
|
||||
|
||||
private HashFunction hashFunction;
|
||||
|
||||
protected volatile ConcurrentMap<QueueKey, List<Integer>> myPartitions = new ConcurrentHashMap<>();
|
||||
|
||||
@Resource
|
||||
private ApplicationEventPublisher applicationEventPublisher;
|
||||
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
this.hashFunction = forName(hashFunctionName);
|
||||
|
||||
QueueKey appKey = new QueueKey(ServiceType.APP);
|
||||
partitionTopicsMap.put(appKey, appTopic);
|
||||
partitionSizesMap.put(appKey, appPartitions);
|
||||
}
|
||||
|
||||
private TopicPartitionInfo resolve(QueueKey queueKey, int hash) {
|
||||
Integer partitionSize = partitionSizesMap.get(queueKey);
|
||||
if (partitionSize == null) {
|
||||
throw new IllegalStateException("Partitions info for queue " + queueKey + " is missing");
|
||||
}
|
||||
|
||||
int partition = Math.abs(hash % partitionSize);
|
||||
|
||||
return buildTopicPartitionInfo(queueKey, partition);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TopicPartitionInfo resolve(ServiceType serviceType, String queueName, UUID entityId) {
|
||||
QueueKey queueKey = getQueueKey(serviceType, queueName);
|
||||
return resolve(queueKey, hash(hashFunction, entityId));
|
||||
}
|
||||
|
||||
@Override
|
||||
public TopicPartitionInfo resolve(ServiceType serviceType, String queueName, String pileCode) {
|
||||
QueueKey queueKey = getQueueKey(serviceType, queueName);
|
||||
return resolve(queueKey, hash(hashFunction, pileCode));
|
||||
}
|
||||
|
||||
private QueueKey getQueueKey(ServiceType serviceType, String queueName) {
|
||||
QueueKey queueKey = new QueueKey(serviceType, queueName);
|
||||
if (!partitionSizesMap.containsKey(queueKey)) {
|
||||
queueKey = new QueueKey(serviceType);
|
||||
}
|
||||
return queueKey;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void recalculatePartitions(ServiceInfo currentService, List<ServiceInfo> otherServices) {
|
||||
log.info("Recalculating partitions");
|
||||
logServiceInfo(currentService);
|
||||
otherServices.forEach(this::logServiceInfo);
|
||||
|
||||
Map<QueueKey, List<ServiceInfo>> queueServicesMap = new HashMap<>();
|
||||
addNode(currentService, queueServicesMap);
|
||||
for (ServiceInfo other : otherServices) {
|
||||
addNode(other, queueServicesMap);
|
||||
}
|
||||
queueServicesMap.values().forEach(list -> list.sort(Comparator.comparing(ServiceInfo::getServiceId)));
|
||||
|
||||
final ConcurrentMap<QueueKey, List<Integer>> newPartitions = new ConcurrentHashMap<>();
|
||||
partitionSizesMap.forEach((queueKey, size) -> {
|
||||
for (int i = 0; i < size; i++) {
|
||||
try {
|
||||
List<ServiceInfo> servers = queueServicesMap.get(queueKey);
|
||||
ServiceInfo serviceInfo = servers == null || servers.isEmpty() ? null : servers.get(i % servers.size());
|
||||
log.info("Server responsible for {}[{}] - {}", queueKey, i, serviceInfo != null ? serviceInfo.getServiceId() : "none");
|
||||
if (currentService.equals(serviceInfo)) {
|
||||
newPartitions.computeIfAbsent(queueKey, key -> new ArrayList<>()).add(i);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to resolve server responsible for {}[{}]", queueKey, i, e);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
final ConcurrentMap<QueueKey, List<Integer>> oldPartitions = myPartitions;
|
||||
myPartitions = newPartitions;
|
||||
|
||||
log.info("Current Server responsible partitions: {}", myPartitions);
|
||||
|
||||
Map<QueueKey, Set<TopicPartitionInfo>> changedPartitionsMap = new HashMap<>();
|
||||
|
||||
Set<QueueKey> removed = new HashSet<>();
|
||||
oldPartitions.forEach((queueKey, partitions) -> {
|
||||
if (!newPartitions.containsKey(queueKey)) {
|
||||
removed.add(queueKey);
|
||||
}
|
||||
});
|
||||
|
||||
removed.forEach(queueKey -> {
|
||||
changedPartitionsMap.put(queueKey, Collections.emptySet());
|
||||
});
|
||||
|
||||
myPartitions.forEach((queueKey, partitions) -> {
|
||||
if (!partitions.equals(oldPartitions.get(queueKey))) {
|
||||
Set<TopicPartitionInfo> tpiList = partitions.stream()
|
||||
.map(partition -> buildTopicPartitionInfo(queueKey, partition))
|
||||
.collect(Collectors.toSet());
|
||||
changedPartitionsMap.put(queueKey, tpiList);
|
||||
}
|
||||
});
|
||||
|
||||
if (!changedPartitionsMap.isEmpty()) {
|
||||
Map<ServiceType, Map<QueueKey, Set<TopicPartitionInfo>>> partitionsByServiceType = new HashMap<>();
|
||||
changedPartitionsMap.forEach((queueKey, partitions) -> {
|
||||
partitionsByServiceType.computeIfAbsent(queueKey.getType(), serviceType -> new HashMap<>())
|
||||
.put(queueKey, partitions);
|
||||
});
|
||||
partitionsByServiceType.forEach(this::publishPartitionChangeEvent);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private void publishPartitionChangeEvent(ServiceType serviceType, Map<QueueKey, Set<TopicPartitionInfo>> partitionsMap) {
|
||||
log.info("Partitions changed: {}", System.lineSeparator() + partitionsMap.entrySet().stream()
|
||||
.map(entry -> "[" + entry.getKey() + "] - [" + entry.getValue().stream()
|
||||
.map(tpi -> tpi.getPartition().orElse(-1).toString()).sorted()
|
||||
.collect(Collectors.joining(", ")) + "]")
|
||||
.collect(Collectors.joining(System.lineSeparator())));
|
||||
PartitionChangeEvent event = new PartitionChangeEvent(this, serviceType, partitionsMap);
|
||||
try {
|
||||
applicationEventPublisher.publishEvent(event);
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to publish partition change event {}", event, e);
|
||||
}
|
||||
}
|
||||
|
||||
private void logServiceInfo(ServiceInfo server) {
|
||||
log.info("Found server: {}", server.getServiceId());
|
||||
}
|
||||
|
||||
private void addNode(ServiceInfo instance, Map<QueueKey, List<ServiceInfo>> queueServiceList) {
|
||||
for (String serviceTypeStr : instance.getServiceTypesList()) {
|
||||
ServiceType serviceType = ServiceType.of(serviceTypeStr);
|
||||
if (ServiceType.APP.equals(serviceType)) {
|
||||
queueServiceList.computeIfAbsent(new QueueKey(serviceType), key -> new ArrayList<>()).add(instance);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private TopicPartitionInfo buildTopicPartitionInfo(QueueKey queueKey, int partition) {
|
||||
List<Integer> partitions = myPartitions.get(queueKey);
|
||||
return buildTopicPartitionInfo(queueKey, partition, partitions != null && partitions.contains(partition));
|
||||
}
|
||||
|
||||
private TopicPartitionInfo buildTopicPartitionInfo(QueueKey queueKey, int partition, boolean myPartition) {
|
||||
return TopicPartitionInfo.builder()
|
||||
.topic(partitionTopicsMap.get(queueKey))
|
||||
.partition(partition)
|
||||
.myPartition(myPartition)
|
||||
.build();
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
/**
|
||||
* 抖音关注:程序员三丙
|
||||
* 知识星球:https://t.zsxq.com/j9b21
|
||||
*/
|
||||
package sanbing.jcpp.infrastructure.queue.discovery;
|
||||
|
||||
|
||||
import sanbing.jcpp.infrastructure.queue.common.TopicPartitionInfo;
|
||||
import sanbing.jcpp.proto.gen.ClusterProto.ServiceInfo;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
public interface PartitionProvider {
|
||||
|
||||
TopicPartitionInfo resolve(ServiceType serviceType,String queueName, UUID entityId);
|
||||
|
||||
TopicPartitionInfo resolve(ServiceType serviceType,String queueName, String pileCode);
|
||||
|
||||
void recalculatePartitions(ServiceInfo currentService, List<ServiceInfo> otherServices);
|
||||
|
||||
}
|
||||
@@ -0,0 +1,33 @@
|
||||
/**
|
||||
* 抖音关注:程序员三丙
|
||||
* 知识星球:https://t.zsxq.com/j9b21
|
||||
*/
|
||||
package sanbing.jcpp.infrastructure.queue.discovery;
|
||||
|
||||
import lombok.Data;
|
||||
import lombok.With;
|
||||
|
||||
@Data
|
||||
public class QueueKey {
|
||||
public static final String MAIN_QUEUE_NAME = "Main";
|
||||
|
||||
private final ServiceType type;
|
||||
@With
|
||||
private final String queueName;
|
||||
|
||||
public QueueKey(ServiceType type, String queueName) {
|
||||
this.type = type;
|
||||
this.queueName = queueName;
|
||||
}
|
||||
|
||||
public QueueKey(ServiceType type) {
|
||||
this.type = type;
|
||||
this.queueName = MAIN_QUEUE_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "QK(" + queueName + "," + type + ")";
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,26 @@
|
||||
/**
|
||||
* 抖音关注:程序员三丙
|
||||
* 知识星球:https://t.zsxq.com/j9b21
|
||||
*/
|
||||
package sanbing.jcpp.infrastructure.queue.discovery;
|
||||
|
||||
|
||||
import sanbing.jcpp.proto.gen.ClusterProto;
|
||||
|
||||
/**
|
||||
* @author baigod
|
||||
*/
|
||||
public interface ServiceInfoProvider {
|
||||
String getServiceId();
|
||||
|
||||
String getServiceWebapiEndpoint();
|
||||
|
||||
String getServiceType();
|
||||
|
||||
boolean isMonolith();
|
||||
|
||||
ClusterProto.ServiceInfo getServiceInfo();
|
||||
|
||||
ClusterProto.ServiceInfo generateNewServiceInfoWithCurrentSystemInfo();
|
||||
|
||||
}
|
||||
@@ -0,0 +1,23 @@
|
||||
/**
|
||||
* 抖音关注:程序员三丙
|
||||
* 知识星球:https://t.zsxq.com/j9b21
|
||||
*/
|
||||
package sanbing.jcpp.infrastructure.queue.discovery;
|
||||
|
||||
import lombok.Getter;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
|
||||
@RequiredArgsConstructor
|
||||
@Getter
|
||||
public enum ServiceType {
|
||||
|
||||
APP("app"),
|
||||
PROTOCOL("protocol");
|
||||
|
||||
private final String label;
|
||||
|
||||
public static ServiceType of(String serviceType) {
|
||||
return ServiceType.valueOf(serviceType.toUpperCase());
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,328 @@
|
||||
/**
|
||||
* 抖音关注:程序员三丙
|
||||
* 知识星球:https://t.zsxq.com/j9b21
|
||||
*/
|
||||
package sanbing.jcpp.infrastructure.queue.discovery;
|
||||
|
||||
import com.google.protobuf.InvalidProtocolBufferException;
|
||||
import jakarta.annotation.PostConstruct;
|
||||
import jakarta.annotation.PreDestroy;
|
||||
import lombok.SneakyThrows;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.curator.framework.CuratorFramework;
|
||||
import org.apache.curator.framework.CuratorFrameworkFactory;
|
||||
import org.apache.curator.framework.imps.CuratorFrameworkState;
|
||||
import org.apache.curator.framework.recipes.cache.ChildData;
|
||||
import org.apache.curator.framework.recipes.cache.CuratorCache;
|
||||
import org.apache.curator.framework.recipes.cache.CuratorCacheListener;
|
||||
import org.apache.curator.framework.state.ConnectionState;
|
||||
import org.apache.curator.framework.state.ConnectionStateListener;
|
||||
import org.apache.curator.retry.RetryForever;
|
||||
import org.apache.curator.utils.CloseableUtils;
|
||||
import org.apache.zookeeper.CreateMode;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.boot.context.event.ApplicationReadyEvent;
|
||||
import org.springframework.context.ApplicationEventPublisher;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.util.Assert;
|
||||
import sanbing.jcpp.infrastructure.queue.discovery.event.OtherServiceShutdownEvent;
|
||||
import sanbing.jcpp.infrastructure.util.annotation.AfterStartUp;
|
||||
import sanbing.jcpp.infrastructure.util.async.JCPPThreadFactory;
|
||||
import sanbing.jcpp.proto.gen.ClusterProto.ServiceInfo;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.concurrent.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
@Service
|
||||
@ConditionalOnProperty(prefix = "zk", value = "enabled", havingValue = "true")
|
||||
@Slf4j
|
||||
public class ZkDiscoveryProvider implements DiscoveryProvider, CuratorCacheListener {
|
||||
|
||||
@Value("${zk.url}")
|
||||
private String zkUrl;
|
||||
@Value("${zk.retry-interval-ms}")
|
||||
private Integer zkRetryInterval;
|
||||
@Value("${zk.connection-timeout-ms}")
|
||||
private Integer zkConnectionTimeout;
|
||||
@Value("${zk.session-timeout-ms}")
|
||||
private Integer zkSessionTimeout;
|
||||
@Value("${zk.zk-dir}")
|
||||
private String zkDir;
|
||||
@Value("${zk.recalculate-delay:0}")
|
||||
private Long recalculateDelay;
|
||||
|
||||
protected final ConcurrentHashMap<String, ScheduledFuture<?>> delayedTasks;
|
||||
|
||||
private final ApplicationEventPublisher applicationEventPublisher;
|
||||
private final ServiceInfoProvider serviceInfoProvider;
|
||||
private final PartitionProvider partitionProvider;
|
||||
|
||||
private ScheduledExecutorService zkExecutorService;
|
||||
private CuratorFramework client;
|
||||
private CuratorCache cache;
|
||||
private String nodePath;
|
||||
private String zkNodesDir;
|
||||
|
||||
private volatile boolean stopped = true;
|
||||
|
||||
public ZkDiscoveryProvider(ApplicationEventPublisher applicationEventPublisher,
|
||||
ServiceInfoProvider serviceInfoProvider,
|
||||
PartitionProvider partitionProvider) {
|
||||
this.applicationEventPublisher = applicationEventPublisher;
|
||||
this.serviceInfoProvider = serviceInfoProvider;
|
||||
this.partitionProvider = partitionProvider;
|
||||
delayedTasks = new ConcurrentHashMap<>();
|
||||
}
|
||||
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
log.info("Discovery Provider Initializing...");
|
||||
Assert.hasLength(zkUrl, missingProperty("zk.url"));
|
||||
Assert.notNull(zkRetryInterval, missingProperty("zk.retry-interval-ms"));
|
||||
Assert.notNull(zkConnectionTimeout, missingProperty("zk.connection-timeout-ms"));
|
||||
Assert.notNull(zkSessionTimeout, missingProperty("zk-session-timeout-ms"));
|
||||
|
||||
zkExecutorService = Executors.newSingleThreadScheduledExecutor(JCPPThreadFactory.forName("zk-discovery"));
|
||||
|
||||
zkNodesDir = zkDir + "/nodes";
|
||||
initZkClient();
|
||||
|
||||
log.info("Initialization completed, using ZK connect string: {}", zkUrl);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<ServiceInfo> getOtherServers() {
|
||||
return cache.stream()
|
||||
.filter(cd -> !cd.getPath().equals(nodePath) && !cd.getPath().equals(zkNodesDir))
|
||||
.map(cd -> {
|
||||
try {
|
||||
return ServiceInfo.parseFrom(cd.getData());
|
||||
} catch (NoSuchElementException | InvalidProtocolBufferException e) {
|
||||
log.error("Failed to decode ZK node", e);
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
})
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
@AfterStartUp(order = AfterStartUp.DISCOVERY_SERVICE)
|
||||
public void onApplicationEvent(ApplicationReadyEvent event) {
|
||||
if (stopped) {
|
||||
log.debug("Ignoring application ready event. Service is stopped.");
|
||||
return;
|
||||
} else {
|
||||
log.info("Received application ready event. Starting current ZK node.");
|
||||
}
|
||||
if (client.getState() != CuratorFrameworkState.STARTED) {
|
||||
log.debug("Ignoring application ready event, ZK client is not started, ZK client state [{}]", client.getState());
|
||||
return;
|
||||
}
|
||||
log.info("Going to publish current server...");
|
||||
publishCurrentServer();
|
||||
log.info("Going to recalculate partitions...");
|
||||
recalculatePartitions();
|
||||
|
||||
zkExecutorService.scheduleAtFixedRate(this::publishCurrentServer, 1, 1, TimeUnit.MINUTES);
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
public synchronized void publishCurrentServer() {
|
||||
ServiceInfo self = serviceInfoProvider.getServiceInfo();
|
||||
if (currentServerExists()) {
|
||||
log.trace("[{}] Updating ZK node for current instance: {}", self.getServiceId(), nodePath);
|
||||
client.setData().forPath(nodePath, serviceInfoProvider.generateNewServiceInfoWithCurrentSystemInfo().toByteArray());
|
||||
} else {
|
||||
try {
|
||||
log.info("[{}] Creating ZK node for current instance", self.getServiceId());
|
||||
nodePath = client.create()
|
||||
.creatingParentsIfNeeded()
|
||||
.withMode(CreateMode.EPHEMERAL_SEQUENTIAL)
|
||||
.forPath(zkNodesDir + "/node-", self.toByteArray());
|
||||
log.info("[{}] Created ZK node for current instance: {}", self.getServiceId(), nodePath);
|
||||
client.getConnectionStateListenable().addListener(checkReconnect(self));
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to create ZK node", e);
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private boolean currentServerExists() {
|
||||
if (nodePath == null) {
|
||||
return false;
|
||||
}
|
||||
try {
|
||||
ServiceInfo self = serviceInfoProvider.getServiceInfo();
|
||||
ServiceInfo registeredServerInfo = ServiceInfo.parseFrom(client.getData().forPath(nodePath));
|
||||
if (self.equals(registeredServerInfo)) {
|
||||
return true;
|
||||
}
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
log.info("ZK node does not exist: {}", nodePath);
|
||||
} catch (Exception e) {
|
||||
log.error("Couldn't check if ZK node exists", e);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private ConnectionStateListener checkReconnect(ServiceInfo self) {
|
||||
return (client, newState) -> {
|
||||
log.info("[{}] ZK state changed: {}", self.getServiceId(), newState);
|
||||
if (newState == ConnectionState.LOST) {
|
||||
zkExecutorService.submit(this::reconnect);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private volatile boolean reconnectInProgress = false;
|
||||
|
||||
private synchronized void reconnect() {
|
||||
if (!reconnectInProgress) {
|
||||
reconnectInProgress = true;
|
||||
try {
|
||||
destroyZkClient();
|
||||
initZkClient();
|
||||
publishCurrentServer();
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to reconnect to ZK: {}", e.getMessage(), e);
|
||||
} finally {
|
||||
reconnectInProgress = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void initZkClient() {
|
||||
try {
|
||||
client = CuratorFrameworkFactory.newClient(zkUrl, zkSessionTimeout, zkConnectionTimeout, new RetryForever(zkRetryInterval));
|
||||
client.start();
|
||||
client.blockUntilConnected();
|
||||
cache = CuratorCache.builder(client, zkNodesDir).build();
|
||||
cache.listenable().addListener(this);
|
||||
cache.start();
|
||||
stopped = false;
|
||||
log.info("ZK client connected");
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to connect to ZK: {}", e.getMessage(), e);
|
||||
CloseableUtils.closeQuietly(cache);
|
||||
CloseableUtils.closeQuietly(client);
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private void unpublishCurrentServer() {
|
||||
try {
|
||||
if (nodePath != null) {
|
||||
client.delete().forPath(nodePath);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to delete ZK node {}", nodePath, e);
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private void destroyZkClient() {
|
||||
stopped = true;
|
||||
try {
|
||||
unpublishCurrentServer();
|
||||
} catch (Exception ignored) {
|
||||
}
|
||||
CloseableUtils.closeQuietly(cache);
|
||||
CloseableUtils.closeQuietly(client);
|
||||
log.info("ZK client disconnected");
|
||||
}
|
||||
|
||||
@PreDestroy
|
||||
public void destroy() {
|
||||
destroyZkClient();
|
||||
zkExecutorService.shutdownNow();
|
||||
log.info("Stopped zk discovery service");
|
||||
}
|
||||
|
||||
public static String missingProperty(String propertyName) {
|
||||
return "The " + propertyName + " property need to be set!";
|
||||
}
|
||||
|
||||
@Override
|
||||
public void event(Type type, ChildData oldData, ChildData data) {
|
||||
if (stopped) {
|
||||
log.info("Ignoring {}. Service is stopped.", type);
|
||||
return;
|
||||
}
|
||||
if (client.getState() != CuratorFrameworkState.STARTED) {
|
||||
log.info("Ignoring {}, ZK client is not started, ZK client state [{}]", type, client.getState());
|
||||
return;
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case NODE_CREATED -> {
|
||||
if (data == null || data.getData() == null) {
|
||||
log.info("Ignoring {} due to empty created data", type);
|
||||
return;
|
||||
}
|
||||
String serviceId = getServiceId(type, data);
|
||||
|
||||
ScheduledFuture<?> task = delayedTasks.remove(serviceId);
|
||||
if (task != null) {
|
||||
if (task.cancel(false)) {
|
||||
log.info("[{}] Recalculate partitions ignored. Service was restarted in time.", serviceId);
|
||||
} else {
|
||||
log.info("[{}] Going to recalculate partitions. Service was not restarted in time!", serviceId);
|
||||
recalculatePartitions();
|
||||
}
|
||||
} else {
|
||||
log.info("[{}] Going to recalculate partitions due to adding new node.",
|
||||
serviceId);
|
||||
recalculatePartitions();
|
||||
}
|
||||
}
|
||||
case NODE_DELETED -> {
|
||||
if (oldData == null || oldData.getData() == null) {
|
||||
log.info("Ignoring {} due to empty delete data", type);
|
||||
return;
|
||||
} else if (nodePath != null && nodePath.equals(oldData.getPath())) {
|
||||
log.info("ZK node for current instance is somehow deleted.");
|
||||
publishCurrentServer();
|
||||
return;
|
||||
}
|
||||
String serviceId = getServiceId(type, oldData);
|
||||
|
||||
zkExecutorService.submit(() -> applicationEventPublisher.publishEvent(new OtherServiceShutdownEvent(this, serviceId)));
|
||||
ScheduledFuture<?> future = zkExecutorService.schedule(() -> {
|
||||
log.info("[{}] Going to recalculate partitions due to removed node", serviceId);
|
||||
ScheduledFuture<?> removedTask = delayedTasks.remove(serviceId);
|
||||
if (removedTask != null) {
|
||||
recalculatePartitions();
|
||||
}
|
||||
}, recalculateDelay, TimeUnit.MILLISECONDS);
|
||||
delayedTasks.put(serviceId, future);
|
||||
}
|
||||
default -> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static String getServiceId(Type type, ChildData data) {
|
||||
ServiceInfo instance;
|
||||
try {
|
||||
instance = ServiceInfo.parseFrom(data.getData());
|
||||
} catch (InvalidProtocolBufferException e) {
|
||||
log.error("Failed to decode server instance for node {}", data.getPath(), e);
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
String serviceId = instance.getServiceId();
|
||||
|
||||
log.info("Processing [{}] event for [{}]", type, serviceId);
|
||||
return serviceId;
|
||||
}
|
||||
|
||||
synchronized void recalculatePartitions() {
|
||||
delayedTasks.values().forEach(future -> future.cancel(false));
|
||||
delayedTasks.clear();
|
||||
partitionProvider.recalculatePartitions(serviceInfoProvider.getServiceInfo(), getOtherServers());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,26 @@
|
||||
/**
|
||||
* 抖音关注:程序员三丙
|
||||
* 知识星球:https://t.zsxq.com/j9b21
|
||||
*/
|
||||
package sanbing.jcpp.infrastructure.queue.discovery.event;
|
||||
|
||||
import lombok.Getter;
|
||||
import lombok.ToString;
|
||||
import org.springframework.context.ApplicationEvent;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
@ToString
|
||||
public class JCPPApplicationEvent extends ApplicationEvent {
|
||||
|
||||
private static final AtomicInteger sequence = new AtomicInteger();
|
||||
|
||||
@Getter
|
||||
private final int sequenceNumber;
|
||||
|
||||
public JCPPApplicationEvent(Object source) {
|
||||
super(source);
|
||||
sequenceNumber = sequence.incrementAndGet();
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,54 @@
|
||||
/**
|
||||
* 抖音关注:程序员三丙
|
||||
* 知识星球:https://t.zsxq.com/j9b21
|
||||
*/
|
||||
package sanbing.jcpp.infrastructure.queue.discovery.event;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.context.ApplicationListener;
|
||||
|
||||
import java.util.concurrent.locks.Lock;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
public abstract class JCPPApplicationEventListener<T extends JCPPApplicationEvent> implements ApplicationListener<T> {
|
||||
|
||||
private int lastProcessedSequenceNumber = Integer.MIN_VALUE;
|
||||
private final Lock seqNumberLock = new ReentrantLock();
|
||||
|
||||
private final Logger log = LoggerFactory.getLogger(getClass());
|
||||
|
||||
@Override
|
||||
public void onApplicationEvent(T event) {
|
||||
if (!filterApplicationEvent(event)) {
|
||||
log.trace("Skipping event due to filter: {}", event);
|
||||
return;
|
||||
}
|
||||
boolean validUpdate = false;
|
||||
seqNumberLock.lock();
|
||||
try {
|
||||
if (event.getSequenceNumber() > lastProcessedSequenceNumber) {
|
||||
validUpdate = true;
|
||||
lastProcessedSequenceNumber = event.getSequenceNumber();
|
||||
}
|
||||
} finally {
|
||||
seqNumberLock.unlock();
|
||||
}
|
||||
if (validUpdate) {
|
||||
try {
|
||||
onJCPPApplicationEvent(event);
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to handle partition change event: {}", event, e);
|
||||
}
|
||||
} else {
|
||||
log.info("Application event ignored due to invalid sequence number ({} > {}). Event: {}", lastProcessedSequenceNumber, event.getSequenceNumber(), event);
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract void onJCPPApplicationEvent(T event);
|
||||
|
||||
protected boolean filterApplicationEvent(T event) {
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
/**
|
||||
* 抖音关注:程序员三丙
|
||||
* 知识星球:https://t.zsxq.com/j9b21
|
||||
*/
|
||||
package sanbing.jcpp.infrastructure.queue.discovery.event;
|
||||
|
||||
import lombok.Getter;
|
||||
|
||||
public class OtherServiceShutdownEvent extends JCPPApplicationEvent {
|
||||
|
||||
@Getter
|
||||
private final String serviceId;
|
||||
|
||||
public OtherServiceShutdownEvent(Object source, String serviceId) {
|
||||
super(source);
|
||||
this.serviceId = serviceId;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,45 @@
|
||||
/**
|
||||
* 抖音关注:程序员三丙
|
||||
* 知识星球:https://t.zsxq.com/j9b21
|
||||
*/
|
||||
package sanbing.jcpp.infrastructure.queue.discovery.event;
|
||||
|
||||
import lombok.Getter;
|
||||
import lombok.ToString;
|
||||
import sanbing.jcpp.infrastructure.queue.common.TopicPartitionInfo;
|
||||
import sanbing.jcpp.infrastructure.queue.discovery.QueueKey;
|
||||
import sanbing.jcpp.infrastructure.queue.discovery.ServiceType;
|
||||
|
||||
import java.io.Serial;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static sanbing.jcpp.infrastructure.queue.discovery.QueueKey.MAIN_QUEUE_NAME;
|
||||
|
||||
@ToString(callSuper = true)
|
||||
public class PartitionChangeEvent extends JCPPApplicationEvent {
|
||||
|
||||
@Serial
|
||||
private static final long serialVersionUID = -8731788167026510559L;
|
||||
|
||||
@Getter
|
||||
private final Map<QueueKey, Set<TopicPartitionInfo>> partitionsMap;
|
||||
|
||||
public PartitionChangeEvent(Object source, ServiceType serviceType, Map<QueueKey, Set<TopicPartitionInfo>> partitionsMap) {
|
||||
super(source);
|
||||
this.partitionsMap = partitionsMap;
|
||||
}
|
||||
|
||||
public Set<TopicPartitionInfo> getAppPartitions() {
|
||||
return getPartitionsByServiceTypeAndQueueName(ServiceType.APP, MAIN_QUEUE_NAME);
|
||||
}
|
||||
|
||||
private Set<TopicPartitionInfo> getPartitionsByServiceTypeAndQueueName(ServiceType serviceType, String queueName) {
|
||||
return partitionsMap.entrySet()
|
||||
.stream()
|
||||
.filter(entry -> serviceType.equals(entry.getKey().getType()) && queueName.equals(entry.getKey().getQueueName()))
|
||||
.flatMap(entry -> entry.getValue().stream())
|
||||
.collect(Collectors.toSet());
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user