云快充1.5.0 初始化

This commit is contained in:
3god
2024-10-08 09:38:54 +08:00
parent dea6774942
commit cb19b45919
297 changed files with 18020 additions and 28 deletions

View File

@@ -0,0 +1,110 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.protocol.forwarder;
import com.fasterxml.jackson.databind.node.ObjectNode;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.springframework.boot.actuate.health.Health;
import sanbing.jcpp.infrastructure.queue.*;
import sanbing.jcpp.infrastructure.queue.common.TopicPartitionInfo;
import sanbing.jcpp.infrastructure.queue.discovery.PartitionProvider;
import sanbing.jcpp.infrastructure.queue.discovery.ServiceInfoProvider;
import sanbing.jcpp.infrastructure.queue.discovery.ServiceType;
import sanbing.jcpp.infrastructure.stats.MessagesStats;
import sanbing.jcpp.infrastructure.stats.StatsFactory;
import sanbing.jcpp.infrastructure.util.codec.ByteUtil;
import sanbing.jcpp.infrastructure.util.jackson.JacksonUtil;
import sanbing.jcpp.infrastructure.util.mdc.MDCUtils;
import sanbing.jcpp.infrastructure.util.trace.Tracer;
import sanbing.jcpp.infrastructure.util.trace.TracerContextUtil;
import sanbing.jcpp.proto.gen.ProtocolProto.UplinkQueueMessage;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.BiConsumer;
import static sanbing.jcpp.infrastructure.queue.common.QueueConstants.MSG_MD_PREFIX;
import static sanbing.jcpp.infrastructure.queue.common.QueueConstants.MSG_MD_TS;
import static sanbing.jcpp.infrastructure.util.trace.TracerContextUtil.JCPP_TRACER_ID;
import static sanbing.jcpp.infrastructure.util.trace.TracerContextUtil.JCPP_TRACER_ORIGIN;
/**
* @author baigod
*/
@Slf4j
public abstract class Forwarder {
protected static final String ERROR = "error";
AtomicBoolean healthy = new AtomicBoolean(true);
@Getter
private final String protocolName;
protected MessagesStats forwarderMessagesStats;
protected final PartitionProvider partitionProvider;
protected final ServiceInfoProvider serviceInfoProvider;
protected final boolean isMonolith;
protected QueueProducer<ProtoQueueMsg<UplinkQueueMessage>> producer;
public Forwarder(String protocolName, StatsFactory statsFactory, PartitionProvider partitionProvider, ServiceInfoProvider serviceInfoProvider) {
this.protocolName = protocolName;
this.partitionProvider = partitionProvider;
this.serviceInfoProvider = serviceInfoProvider;
this.forwarderMessagesStats = statsFactory.createMessagesStats("forwarderMessages", "protocol", protocolName);
this.isMonolith = serviceInfoProvider.isMonolith();
}
public abstract Health health();
public abstract void destroy();
protected void jcppForward(String topic, String key, UplinkQueueMessage msg, BiConsumer<Boolean, ObjectNode> consumer) {
QueueMsgHeaders headers = new DefaultQueueMsgHeaders();
Tracer currentTracer = TracerContextUtil.getCurrentTracer();
headers.put(MSG_MD_PREFIX + JCPP_TRACER_ID, ByteUtil.stringToBytes(currentTracer.getTraceId()));
headers.put(MSG_MD_PREFIX + JCPP_TRACER_ORIGIN, ByteUtil.stringToBytes(currentTracer.getOrigin()));
headers.put(MSG_MD_PREFIX + MSG_MD_TS, ByteUtil.longToBytes(currentTracer.getTracerTs()));
TopicPartitionInfo tpi = partitionProvider.resolve(ServiceType.APP, topic, key);
producer.send(tpi, new ProtoQueueMsg<>(key, msg, headers), new QueueCallback() {
@Override
public void onSuccess(QueueMsgMetadata metadata) {
TracerContextUtil.newTracer(currentTracer.getTraceId(), currentTracer.getOrigin(), currentTracer.getTracerTs());
MDCUtils.recordTracer();
log.trace("单体消息转发成功 key:{}", key);
if (consumer != null) {
consumer.accept(true, JacksonUtil.newObjectNode());
}
}
@Override
public void onFailure(Throwable t) {
TracerContextUtil.newTracer(currentTracer.getTraceId(), currentTracer.getOrigin(), currentTracer.getTracerTs());
MDCUtils.recordTracer();
log.warn("单体消息转发异常", t);
if (consumer != null) {
ObjectNode objectNode = JacksonUtil.newObjectNode();
objectNode.put(ERROR, t.getClass() + ": " + t.getMessage());
consumer.accept(true, objectNode);
}
}
});
}
public abstract void sendMessage(UplinkQueueMessage msg, BiConsumer<Boolean, ObjectNode> consumer);
public abstract void sendMessage(UplinkQueueMessage msg);
}

View File

@@ -0,0 +1,204 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.protocol.forwarder;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.protobuf.InvalidProtocolBufferException;
import com.google.protobuf.util.JsonFormat;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.config.SslConfigs;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.header.internals.RecordHeader;
import org.apache.kafka.common.header.internals.RecordHeaders;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.boot.actuate.health.Health;
import sanbing.jcpp.infrastructure.queue.discovery.PartitionProvider;
import sanbing.jcpp.infrastructure.queue.discovery.ServiceInfoProvider;
import sanbing.jcpp.infrastructure.queue.provider.AppQueueFactory;
import sanbing.jcpp.infrastructure.stats.StatsFactory;
import sanbing.jcpp.infrastructure.util.codec.ByteUtil;
import sanbing.jcpp.infrastructure.util.jackson.JacksonUtil;
import sanbing.jcpp.infrastructure.util.mdc.MDCUtils;
import sanbing.jcpp.infrastructure.util.trace.Tracer;
import sanbing.jcpp.infrastructure.util.trace.TracerContextUtil;
import sanbing.jcpp.proto.gen.ProtocolProto.UplinkQueueMessage;
import sanbing.jcpp.protocol.cfg.ForwarderCfg;
import sanbing.jcpp.protocol.cfg.KafkaCfg;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.BiConsumer;
import static sanbing.jcpp.infrastructure.queue.common.QueueConstants.MSG_MD_PREFIX;
import static sanbing.jcpp.infrastructure.queue.common.QueueConstants.MSG_MD_TS;
import static sanbing.jcpp.infrastructure.util.trace.TracerContextUtil.JCPP_TRACER_ID;
import static sanbing.jcpp.infrastructure.util.trace.TracerContextUtil.JCPP_TRACER_ORIGIN;
/**
* @author baigod
*/
@Slf4j
public class KafkaForwarder extends Forwarder {
AtomicBoolean healthy = new AtomicBoolean(true);
private static final String OFFSET = "offset";
private static final String PARTITION = "partition";
private static final String TOPIC = "topic";
private final KafkaCfg kafkaCfg;
protected final boolean jcppPartition;
private KafkaProducer<String, byte[]> kafkaProducer;
public KafkaForwarder(String protocolName,
ForwarderCfg forwarderCfg,
StatsFactory statsFactory,
AppQueueFactory appQueueFactory,
PartitionProvider partitionProvider,
ServiceInfoProvider serviceInfoProvider) {
super(protocolName, statsFactory, partitionProvider, serviceInfoProvider);
this.kafkaCfg = forwarderCfg.getKafka();
this.jcppPartition = kafkaCfg.isJcppPartition();
if (this.isMonolith || jcppPartition) {
this.producer = appQueueFactory.createProtocolUplinkMsgProducer(kafkaCfg.getTopic());
} else {
Properties properties = new Properties();
properties.put(ProducerConfig.CLIENT_ID_CONFIG, "kafka-forwarder-" + getProtocolName() + "-" + serviceInfoProvider.getServiceId());
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaCfg.getBootstrapServers());
properties.put(ProducerConfig.ACKS_CONFIG, kafkaCfg.getAcks());
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
properties.put(ProducerConfig.RETRIES_CONFIG, kafkaCfg.getRetries());
properties.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, kafkaCfg.getCompressionType());
properties.put(ProducerConfig.BATCH_SIZE_CONFIG, kafkaCfg.getBatchSize());
properties.put(ProducerConfig.LINGER_MS_CONFIG, kafkaCfg.getLingerMs());
properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, kafkaCfg.getBufferMemory());
if (this.kafkaCfg.getOtherProperties() != null) {
this.kafkaCfg.getOtherProperties().forEach((k, v) -> {
if (SslConfigs.SSL_KEYSTORE_CERTIFICATE_CHAIN_CONFIG.equals(k)
|| SslConfigs.SSL_KEYSTORE_KEY_CONFIG.equals(k)
|| SslConfigs.SSL_TRUSTSTORE_CERTIFICATES_CONFIG.equals(k)) {
v = v.replace("\\n", "\n");
}
properties.put(k, v);
});
}
this.kafkaProducer = new KafkaProducer<>(properties);
}
}
@Override
public Health health() {
if (healthy.get()) {
return Health.up().withDetail("producer", "Kafka producer is healthy").build();
} else {
return Health.down().withDetail("producer", "Kafka producer is unhealthy").build();
}
}
@Override
public void destroy() {
healthy.set(false);
if (this.kafkaProducer != null) {
try {
this.kafkaProducer.close();
} catch (Exception e) {
log.error("Failed to close producer during destroy()", e);
}
}
}
@Override
public void sendMessage(UplinkQueueMessage msg, BiConsumer<Boolean, ObjectNode> consumer) {
String topic = kafkaCfg.getTopic();
try {
String messageKey = msg.getMessageKey();
if (isMonolith || jcppPartition) {
jcppForward(topic, messageKey, msg, consumer);
} else {
kafkaForward(topic, messageKey, msg, consumer);
}
} catch (Exception e) {
log.debug("[{}] Failed to forward Kafka message: {}", getProtocolName(), msg, e);
}
}
@Override
public void sendMessage(UplinkQueueMessage msg) {
sendMessage(msg, null);
}
private void kafkaForward(String topic, String key, UplinkQueueMessage msg, BiConsumer<Boolean, ObjectNode> consumer) throws InvalidProtocolBufferException {
Headers headers = new RecordHeaders();
Tracer currentTracer = TracerContextUtil.getCurrentTracer();
headers.add(new RecordHeader(MSG_MD_PREFIX + JCPP_TRACER_ID, ByteUtil.stringToBytes(currentTracer.getTraceId())));
headers.add(new RecordHeader(MSG_MD_PREFIX + JCPP_TRACER_ORIGIN, ByteUtil.stringToBytes(currentTracer.getOrigin())));
headers.add(new RecordHeader(MSG_MD_PREFIX + MSG_MD_TS, ByteUtil.longToBytes(currentTracer.getTracerTs())));
if (kafkaCfg.getEncoder() == KafkaCfg.EncoderType.json) {
String protoJson = JsonFormat.printer().print(msg);
log.info("[{}] Kafka forwarder send json headers:{}, message:{}", getProtocolName(), headers, protoJson);
kafkaProducer.send(new ProducerRecord<>(topic, null, null, key, ByteUtil.stringToBytes(protoJson), headers),
(metadata, e) -> logAndDoConsumer(consumer, metadata, e, currentTracer));
} else {
log.info("[{}] Kafka forwarder send protobuf headers:{}, message:{}", getProtocolName(), headers, msg);
kafkaProducer.send(new ProducerRecord<>(topic, null, null, key, msg.toByteArray(), headers),
(metadata, e) -> logAndDoConsumer(consumer, metadata, e, currentTracer));
}
}
private void logAndDoConsumer(BiConsumer<Boolean, ObjectNode> consumer, RecordMetadata metadata, Exception e, Tracer currentTracer) {
TracerContextUtil.newTracer(currentTracer.getTraceId(), currentTracer.getOrigin(), currentTracer.getTracerTs());
MDCUtils.recordTracer();
log.debug("Kafka 消息转发完成, success:{}", e == null);
if (consumer != null) {
onComplete(metadata, e, consumer);
}
}
private void onComplete(RecordMetadata metadata, Exception e, BiConsumer<Boolean, ObjectNode> consumer) {
if (consumer == null) {
return;
}
ObjectNode objectNode = JacksonUtil.newObjectNode();
objectNode.put(OFFSET, String.valueOf(metadata.offset()));
objectNode.put(PARTITION, String.valueOf(metadata.partition()));
objectNode.put(TOPIC, metadata.topic());
if (e != null) {
objectNode.put(ERROR, e.getClass() + ": " + e.getMessage());
}
consumer.accept(e == null, objectNode);
}
}

View File

@@ -0,0 +1,84 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.protocol.forwarder;
import com.fasterxml.jackson.databind.node.ObjectNode;
import lombok.extern.slf4j.Slf4j;
import org.springframework.boot.actuate.health.Health;
import sanbing.jcpp.infrastructure.queue.discovery.PartitionProvider;
import sanbing.jcpp.infrastructure.queue.discovery.ServiceInfoProvider;
import sanbing.jcpp.infrastructure.queue.provider.AppQueueFactory;
import sanbing.jcpp.infrastructure.stats.StatsFactory;
import sanbing.jcpp.proto.gen.ProtocolProto.UplinkQueueMessage;
import sanbing.jcpp.protocol.cfg.ForwarderCfg;
import sanbing.jcpp.protocol.cfg.MemoryCfg;
import java.util.function.BiConsumer;
/**
* @author baigod
*/
@Slf4j
public class MemoryForwarder extends Forwarder {
private final MemoryCfg memoryCfg;
public MemoryForwarder(String protocolName,
ForwarderCfg forwarderCfg,
StatsFactory statsFactory,
AppQueueFactory appQueueFactory,
PartitionProvider partitionProvider,
ServiceInfoProvider serviceInfoProvider) {
super(protocolName, statsFactory, partitionProvider, serviceInfoProvider);
this.memoryCfg = forwarderCfg.getMemory();
super.producer = appQueueFactory.createProtocolUplinkMsgProducer(memoryCfg.getTopic());
}
@Override
public Health health() {
if (healthy.get()) {
return Health.up().withDetail("producer", "Memory producer is healthy").build();
} else {
return Health.down().withDetail("producer", "Memory producer is unhealthy").build();
}
}
@Override
public void destroy() {
healthy.set(false);
if (this.producer != null) {
try {
this.producer.stop();
} catch (Exception e) {
log.error("Failed to close producer during destroy()", e);
}
}
}
@Override
public void sendMessage(UplinkQueueMessage msg, BiConsumer<Boolean, ObjectNode> consumer) {
String topic = memoryCfg.getTopic();
String key = msg.getMessageKey();
try {
jcppForward(topic, key, msg, consumer);
} catch (Exception e) {
log.warn("[{}] Failed to forward Memory message: {}", getProtocolName(), msg, e);
}
}
@Override
public void sendMessage(UplinkQueueMessage msg) {
sendMessage(msg, null);
}
}