云快充1.5.0 初始化

This commit is contained in:
3god
2024-10-08 09:38:54 +08:00
parent dea6774942
commit cb19b45919
297 changed files with 18020 additions and 28 deletions

33
.gitignore vendored
View File

@@ -1,5 +1,10 @@
# Compiled class file
*.class
HELP.md
target/
!.mvn/wrapper/maven-wrapper.jar
!**/src/main/**/target/
!**/src/test/**/target/
# Log file
*.log
@@ -21,3 +26,31 @@
# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
hs_err_pid*
### STS ###
.apt_generated
.classpath
.factorypath
.project
.settings
.springBeans
.sts4-cache
### IntelliJ IDEA ###
.idea
*.iws
*.iml
*.ipr
### NetBeans ###
/nbproject/private/
/nbbuild/
/dist/
/nbdist/
/.nb-gradle/
build/
!**/src/main/**/build/
!**/src/test/**/build/
### VS Code ###
.vscode/

View File

@@ -1,37 +1,27 @@
# JChargePointProtocol
#### 介绍
JAVA 充电桩协议库
###### 一个高性能、分布式、支持海量并发量的充电桩JAVA服务端计划支持100种协议为充电应用提供基础能力。
#### 软件架构
软件架构说明
<p>
<a target="_blank"
style="text-decoration: none !important;"
href="https://www.apache.org/licenses/LICENSE-2.0">
<img alt="GitHub License" src="https://img.shields.io/github/license/sanbing-java/JChargePointProtocol?style=flat&link=https%3A%2F%2Fwww.apache.org%2Flicenses%2FLICENSE-2.0" />
</a>
<a href="https://t.zsxq.com/j9b21">
<img alt="Static Badge" src="https://img.shields.io/badge/%E7%A4%BE%E7%BE%A4-%E4%B8%89%E4%B8%99%E5%BC%80%E6%BA%90%E7%A4%BE%E5%8C%BA-ff69b4?style=flat">
</a>
</p>
#### 安装教程
1. xxxx
2. xxxx
3. xxxx
#### 使用说明
1. xxxx
2. xxxx
3. xxxx
------------------------------
#### 参与贡献
1. Fork 本仓库
2. 新建 Feat_xxx 分支
3. 提交代码
4. 新建 Pull Request
1. Fork 本仓库
2. 新建 Feat_xxx 分支
3. 提交代码
4. 加入社群
5. 新建 Pull Request
#### 特技
1. 使用 Readme\_XXX.md 来支持不同的语言,例如 Readme\_en.md, Readme\_zh.md
2. Gitee 官方博客 [blog.gitee.com](https://blog.gitee.com)
3. 你可以 [https://gitee.com/explore](https://gitee.com/explore) 这个地址来了解 Gitee 上的优秀开源项目
4. [GVP](https://gitee.com/gvp) 全称是 Gitee 最有价值开源项目,是综合评定出的优秀开源项目
5. Gitee 官方提供的使用手册 [https://gitee.com/help](https://gitee.com/help)
6. Gitee 封面人物是一档用来展示 Gitee 会员风采的栏目 [https://gitee.com/gitee-stars/](https://gitee.com/gitee-stars/)

40
docker/Dockerfile-App Normal file
View File

@@ -0,0 +1,40 @@
#
# 抖音关注:程序员三丙
# 知识星球https://t.zsxq.com/j9b21
#
FROM registry.cn-hangzhou.aliyuncs.com/sanbing/jcpp-base:latest AS base
WORKDIR /app
COPY . .
RUN mvn -U -B -T 0.8C clean install -DskipTests
#分层
FROM registry.cn-hangzhou.aliyuncs.com/sanbing/openjdk:21-jdk-slim-bullseye AS builder
WORKDIR /app
COPY --from=base /app/jcpp-app-bootstrap/target/application.jar application.jar
RUN java -Djarmode=tools -jar application.jar extract --layers --destination extracted
# 执行
FROM registry.cn-hangzhou.aliyuncs.com/sanbing/openjdk:21-jdk-slim-bullseye
WORKDIR /app
COPY --from=builder /app/extracted/dependencies/ ./
COPY --from=builder /app/extracted/spring-boot-loader/ ./
COPY --from=builder /app/extracted/snapshot-dependencies/ ./
COPY --from=builder /app/extracted/application/ ./
COPY --from=base /app/jcpp-app-bootstrap/target/conf ./config
COPY --from=base /app/docker/start.sh .
RUN mkdir -p /var/log/sanbing && \
mkdir -p /var/log/sanbing/jcpp && \
mkdir -p /var/log/sanbing/accesslog && \
mkdir -p /var/log/sanbing/gc && \
mkdir -p /var/log/sanbing/heapdump && \
chmod 700 -R /var/log/*
RUN chmod a+x *.sh && mv start.sh /usr/bin
EXPOSE 8080 8080
CMD ["start.sh"]

11
docker/Dockerfile-Base Normal file
View File

@@ -0,0 +1,11 @@
#
# 抖音关注:程序员三丙
# 知识星球https://t.zsxq.com/j9b21
#
FROM registry.cn-hangzhou.aliyuncs.com/sanbing/mvn:3.9.9-jdk21 AS base
WORKDIR /app
COPY . .
RUN mvn -U -B -T 0.8C clean install -DskipTests
RUN rm -rf /app

View File

@@ -0,0 +1,40 @@
#
# 抖音关注:程序员三丙
# 知识星球https://t.zsxq.com/j9b21
#
FROM registry.cn-hangzhou.aliyuncs.com/sanbing/jcpp-base:latest AS base
WORKDIR /app
COPY . .
RUN mvn -U -B -T 0.8C clean install -DskipTests
#分层
FROM registry.cn-hangzhou.aliyuncs.com/sanbing/openjdk:21-jdk-slim-bullseye AS builder
WORKDIR /app
COPY --from=base /app/jcpp-protocol-bootstrap/target/application.jar application.jar
RUN java -Djarmode=tools -jar application.jar extract --layers --destination extracted
# 执行
FROM registry.cn-hangzhou.aliyuncs.com/sanbing/openjdk:21-jdk-slim-bullseye
WORKDIR /app
COPY --from=builder /app/extracted/dependencies/ ./
COPY --from=builder /app/extracted/spring-boot-loader/ ./
COPY --from=builder /app/extracted/snapshot-dependencies/ ./
COPY --from=builder /app/extracted/application/ ./
COPY --from=base /app/jcpp-protocol-bootstrap/target/conf ./config
COPY --from=base /app/docker/start.sh .
RUN mkdir -p /var/log/sanbing && \
mkdir -p /var/log/sanbing/jcpp && \
mkdir -p /var/log/sanbing/accesslog && \
mkdir -p /var/log/sanbing/gc && \
mkdir -p /var/log/sanbing/heapdump && \
chmod 700 -R /var/log/*
RUN chmod a+x *.sh && mv start.sh /usr/bin
EXPOSE 8081 8081
CMD ["start.sh"]

View File

@@ -0,0 +1,57 @@
#
# 抖音关注:程序员三丙
# 知识星球https://t.zsxq.com/j9b21
#
networks:
sanbing-network:
driver: bridge
name: sanbing-network
ipam:
config:
- subnet: 10.10.0.0/24
services:
zookeeper:
image: registry.cn-hangzhou.aliyuncs.com/sanbing/zookeeper:3.9
restart: always
networks:
- sanbing-network
ports:
- "2181:2181"
environment:
ALLOW_ANONYMOUS_LOGIN: true
kafka:
image: registry.cn-hangzhou.aliyuncs.com/sanbing/kafka:3.7.1
restart: always
depends_on:
- zookeeper
networks:
- sanbing-network
ports:
- "9092:9092"
env_file:
- kafka.env
kafka-exporter:
image: registry.cn-hangzhou.aliyuncs.com/sanbing/kafka-exporter:latest
restart: always
depends_on:
- kafka
networks:
- sanbing-network
ports:
- "9308:9308"
command:
- '--kafka.server=kafka:9092'
# 切换示例项目的队列类型为kafka
example:
restart: always
image: example:latest
depends_on:
- kafka
networks:
- sanbing-network
ports:
- "8080:8080"
env_file:
- queue-kafka.env

View File

@@ -0,0 +1,90 @@
#
# 抖音关注:程序员三丙
# 知识星球https://t.zsxq.com/j9b21
#
networks:
sanbing-network:
driver: bridge
name: sanbing-network
ipam:
config:
- subnet: 10.10.0.0/24
services:
# Redis cluster
redis-node-0:
image: registry.cn-hangzhou.aliyuncs.com/sanbing/redis-cluster:7.4
restart: always
networks:
- sanbing-network
environment:
- 'REDIS_PASSWORD=sanbing'
- 'REDISCLI_AUTH=sanbing'
- 'REDIS_NODES=redis-node-0 redis-node-1 redis-node-2 redis-node-3 redis-node-4 redis-node-5'
redis-node-1:
image: registry.cn-hangzhou.aliyuncs.com/sanbing/redis-cluster:7.4
restart: always
networks:
- sanbing-network
depends_on:
- redis-node-0
environment:
- 'REDIS_PASSWORD=sanbing'
- 'REDISCLI_AUTH=sanbing'
- 'REDIS_NODES=redis-node-0 redis-node-1 redis-node-2 redis-node-3 redis-node-4 redis-node-5'
redis-node-2:
image: registry.cn-hangzhou.aliyuncs.com/sanbing/redis-cluster:7.4
restart: always
networks:
- sanbing-network
depends_on:
- redis-node-1
environment:
- 'REDIS_PASSWORD=sanbing'
- 'REDISCLI_AUTH=sanbing'
- 'REDIS_NODES=redis-node-0 redis-node-1 redis-node-2 redis-node-3 redis-node-4 redis-node-5'
redis-node-3:
image: registry.cn-hangzhou.aliyuncs.com/sanbing/redis-cluster:7.4
restart: always
networks:
- sanbing-network
depends_on:
- redis-node-2
environment:
- 'REDIS_PASSWORD=sanbing'
- 'REDISCLI_AUTH=sanbing'
- 'REDIS_NODES=redis-node-0 redis-node-1 redis-node-2 redis-node-3 redis-node-4 redis-node-5'
redis-node-4:
image: registry.cn-hangzhou.aliyuncs.com/sanbing/redis-cluster:7.4
restart: always
networks:
- sanbing-network
depends_on:
- redis-node-3
environment:
- 'REDIS_PASSWORD=sanbing'
- 'REDISCLI_AUTH=sanbing'
- 'REDIS_NODES=redis-node-0 redis-node-1 redis-node-2 redis-node-3 redis-node-4 redis-node-5'
redis-node-5:
image: registry.cn-hangzhou.aliyuncs.com/sanbing/redis-cluster:7.4
restart: always
networks:
- sanbing-network
depends_on:
- redis-node-0
- redis-node-1
- redis-node-2
- redis-node-3
- redis-node-4
environment:
- 'REDIS_PASSWORD=sanbing'
- 'REDISCLI_AUTH=sanbing'
- 'REDIS_NODES=redis-node-0 redis-node-1 redis-node-2 redis-node-3 redis-node-4 redis-node-5'
- 'REDIS_CLUSTER_REPLICAS=1'
- 'REDIS_CLUSTER_CREATOR=yes'

View File

@@ -0,0 +1,49 @@
#
# 抖音关注:程序员三丙
# 知识星球https://t.zsxq.com/j9b21
#
networks:
sanbing-network:
driver: bridge
name: sanbing-network
ipam:
config:
- subnet: 10.10.0.0/24
services:
redis-master:
image: registry.cn-hangzhou.aliyuncs.com/sanbing/redis:7.4
restart: always
networks:
- sanbing-network
environment:
- 'REDIS_REPLICATION_MODE=master'
- 'REDIS_PASSWORD=sanbing'
redis-slave:
image: registry.cn-hangzhou.aliyuncs.com/sanbing/redis:7.4
restart: always
networks:
- sanbing-network
environment:
- 'REDIS_REPLICATION_MODE=slave'
- 'REDIS_MASTER_HOST=redis-master'
- 'REDIS_MASTER_PASSWORD=sanbing'
- 'REDIS_PASSWORD=sanbing'
depends_on:
- redis-master
redis-sentinel:
image: registry.cn-hangzhou.aliyuncs.com/sanbing/redis-sentinel:7.4
restart: always
networks:
- sanbing-network
environment:
- 'REDIS_MASTER_HOST=redis-master'
- 'REDIS_MASTER_SET=mymaster'
- 'REDIS_SENTINEL_PASSWORD=sanbing'
- 'REDIS_MASTER_PASSWORD=sanbing'
depends_on:
- redis-master
- redis-slave

View File

@@ -0,0 +1,23 @@
#
# 抖音关注:程序员三丙
# 知识星球https://t.zsxq.com/j9b21
#
networks:
sanbing-network:
driver: bridge
name: sanbing-network
ipam:
config:
- subnet: 10.10.0.0/24
services:
redis:
image: registry.cn-hangzhou.aliyuncs.com/sanbing/redis:7.4
restart: always
networks:
- sanbing-network
ports:
- '6379:6379'
environment:
- 'REDIS_PASSWORD=sanbing'

14
docker/kafka.env Normal file
View File

@@ -0,0 +1,14 @@
KAFKA_CFG_NODE_ID=0
ALLOW_PLAINTEXT_LISTENER=yes
KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181
KAFKA_CFG_LISTENERS=INSIDE://:9093,OUTSIDE://:9092
KAFKA_CFG_ADVERTISED_LISTENERS=INSIDE://:9093,OUTSIDE://kafka:9092
KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT
KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true
KAFKA_CFG_INTER_BROKER_LISTENER_NAME=INSIDE
KAFKA_CFG_LOG_RETENTION_BYTES=1073741824
KAFKA_CFG_SEGMENT_BYTES=268435456
KAFKA_CFG_LOG_RETENTION_MS=300000
KAFKA_CFG_LOG_CLEANUP_POLICY=delete

2
docker/queue-kafka.env Normal file
View File

@@ -0,0 +1,2 @@
QUEUE_TYPE=kafka
KAFKA_SERVERS=kafka:9092

View File

@@ -0,0 +1,121 @@
--
-- 抖音关注:程序员三丙
-- 知识星球https://t.zsxq.com/j9b21
--
CREATE TABLE IF NOT EXISTS jcpp_user
(
id uuid not null
constraint owner_pkey
primary key,
created_time timestamp default CURRENT_TIMESTAMP not null,
additional_info jsonb,
status varchar(16) not null,
user_name varchar(255) not null,
user_credentials jsonb not null,
version int default 1
);
CREATE UNIQUE INDEX IF NOT EXISTS uni_user_name
on jcpp_user (user_name);
CREATE TABLE IF NOT EXISTS jcpp_station
(
id uuid not null
constraint station_pkey
primary key,
created_time timestamp default CURRENT_TIMESTAMP not null,
additional_info jsonb,
station_name varchar(255) not null,
station_code varchar(255) not null,
owner_id uuid not null,
longitude double precision not null,
latitude double precision not null,
owner_type varchar(16) not null,
province varchar(255),
city varchar(255),
county varchar(255),
address varchar(255),
status varchar(16) not null,
version int default 1
);
CREATE UNIQUE INDEX IF NOT EXISTS uni_station_code
on jcpp_station (station_code);
CREATE TABLE IF NOT EXISTS jcpp_pile
(
id uuid not null
constraint pile_pkey
primary key,
created_time timestamp default CURRENT_TIMESTAMP not null,
additional_info jsonb,
pile_name varchar(255) not null,
pile_code varchar(255) not null,
protocol varchar(255) not null,
station_id uuid not null,
owner_id uuid not null,
owner_type varchar(16) not null,
brand varchar(255),
model varchar(255),
manufacturer varchar(255),
status varchar(16) not null,
type varchar(16) not null,
version int default 1
);
CREATE UNIQUE INDEX IF NOT EXISTS uni_pile_code
on jcpp_pile (pile_code);
CREATE TABLE IF NOT EXISTS jcpp_gun
(
id uuid not null
primary key,
created_time timestamp default CURRENT_TIMESTAMP not null,
additional_info varchar(255),
gun_no varchar(255) not null,
gun_name varchar(255) not null,
gun_code varchar(255) not null,
station_id uuid not null,
pile_id uuid not null,
owner_id uuid not null,
owner_type varchar(16) not null,
run_status varchar(16) not null,
run_status_updated_time timestamp not null,
opt_status varchar(16) not null,
version int default 1
);
CREATE UNIQUE INDEX IF NOT EXISTS uni_gun_code
on jcpp_gun (gun_code);
CREATE TABLE IF NOT EXISTS jcpp_order
(
id uuid not null
primary key,
internal_order_no varchar(255) not null,
external_order_no varchar(255) not null,
pile_order_No varchar(255) not null,
created_time timestamp default CURRENT_TIMESTAMP not null,
additional_info jsonb,
updated_time timestamp,
cancelled_time timestamp,
status varchar(16) not null,
type varchar(16) not null,
creator_id uuid not null,
station_id uuid not null,
pile_id uuid not null,
gun_id uuid not null,
plate_no varchar(64),
settlement_amount bigint default 0 not null,
settlement_details jsonb,
electricity_quantity numeric(16, 9) default 0 not null
);
CREATE UNIQUE INDEX IF NOT EXISTS uni_internal_order_no
on jcpp_order (internal_order_no);
CREATE UNIQUE INDEX IF NOT EXISTS uni_external_order_no
on jcpp_order (external_order_no);

21
docker/start.sh Normal file
View File

@@ -0,0 +1,21 @@
#!/bin/bash
#
# 抖音关注:程序员三丙
# 知识星球https://t.zsxq.com/j9b21
#
echo "Starting Server ..."
export JAVA_APP_OPTS="-XX:+UseContainerSupport -XX:InitialRAMPercentage=10 -XX:MaxRAMPercentage=70 \
-Xlog:gc*,heap*,age*,safepoint=debug:file=/var/log/sanbing/gc/gc.log:time,uptime,level,tags:filecount=10,filesize=10M \
-XX:+HeapDumpOnOutOfMemoryError \
-XX:HeapDumpPath=/var/log/sanbing/heapdump/ \
-XX:+UseTLAB -XX:+ResizeTLAB -XX:+PerfDisableSharedMem -XX:+UseCondCardMark \
-XX:+UseG1GC -XX:MaxGCPauseMillis=500 -XX:+UseStringDeduplication -XX:+ParallelRefProcEnabled -XX:MaxTenuringThreshold=10 \
-Xss512k -XX:MaxDirectMemorySize=128M -XX:G1ReservePercent=20 \
-XX:-OmitStackTraceInFastThrow \
-Dlogging.config=/app/config/log4j2.xml"
#export JAVA_OPTS_EXTEND="-Xdebug -Xrunjdwp:transport=dt_socket,address=0.0.0.0:8000,server=y,suspend=n"
exec java $JAVA_APP_OPTS $JAVA_OPTS_EXTEND $JAVA_OPTS -Dnetworkaddress.cache.ttl=60 -jar /app/application.jar

View File

@@ -0,0 +1,84 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
抖音关注:程序员三丙
知识星球https://t.zsxq.com/j9b21
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>sanbing</groupId>
<artifactId>jcpp-parent</artifactId>
<version>1.0.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>jcpp-app-bootstrap</artifactId>
<packaging>jar</packaging>
<name>JChargePointProtocol Application Bootstrap Module</name>
<description>App引导程序</description>
<properties>
<main.dir>${basedir}/..</main.dir>
<disruptor.version>3.4.4</disruptor.version>
</properties>
<dependencies>
<dependency>
<groupId>sanbing</groupId>
<artifactId>jcpp-app</artifactId>
</dependency>
<dependency>
<groupId>sanbing</groupId>
<artifactId>jcpp-protocol-yunkuaichong</artifactId>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>junit-jupiter</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<finalName>application</finalName>
<plugins>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
<configuration>
<skip>false</skip>
<layout>ZIP</layout>
<mainClass>sanbing.jcpp.JCPPServerApplication</mainClass>
<excludeDevtools>true</excludeDevtools>
<layers>
<enabled>true</enabled>
<configuration>${project.basedir}/src/layers.xml</configuration>
</layers>
</configuration>
<executions>
<execution>
<goals>
<goal>repackage</goal>
<goal>build-info</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-resources-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
</plugin>
</plugins>
</build>
</project>

View File

@@ -0,0 +1,33 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
抖音关注:程序员三丙
知识星球https://t.zsxq.com/j9b21
-->
<layers xmlns="http://www.springframework.org/schema/boot/layers"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.springframework.org/schema/boot/layers
https://www.springframework.org/schema/boot/layers/layers-2.7.xsd">
<application>
<into layer="spring-boot-loader">
<include>org/springframework/boot/loader/**</include>
</into>
<into layer="application" />
</application>
<dependencies>
<into layer="application">
<includeModuleDependencies />
</into>
<into layer="snapshot-dependencies">
<include>*:*:*SNAPSHOT</include>
</into>
<into layer="dependencies" />
</dependencies>
<layerOrder>
<layer>dependencies</layer>
<layer>spring-boot-loader</layer>
<layer>snapshot-dependencies</layer>
<layer>application</layer>
</layerOrder>
</layers>

View File

@@ -0,0 +1,39 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp;
import org.springframework.boot.Banner;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.scheduling.annotation.EnableAsync;
import org.springframework.scheduling.annotation.EnableScheduling;
import java.util.Arrays;
/**
* @author baigod
*/
@SpringBootApplication
@EnableAsync
@EnableScheduling
public class JCPPServerApplication {
private static final String SPRING_CONFIG_NAME_KEY = "--spring.config.name";
private static final String DEFAULT_SPRING_CONFIG_PARAM = SPRING_CONFIG_NAME_KEY + "=" + "app-service";
public static void main(String[] args) {
new SpringApplicationBuilder(JCPPServerApplication.class).bannerMode(Banner.Mode.LOG).run(updateArguments(args));
}
private static String[] updateArguments(String[] args) {
if (Arrays.stream(args).noneMatch(arg -> arg.startsWith(SPRING_CONFIG_NAME_KEY))) {
String[] modifiedArgs = new String[args.length + 1];
System.arraycopy(args, 0, modifiedArgs, 0, args.length);
modifiedArgs[args.length] = DEFAULT_SPRING_CONFIG_PARAM;
return modifiedArgs;
}
return args;
}
}

View File

@@ -0,0 +1,214 @@
server:
address: "${HTTP_BIND_ADDRESS:0.0.0.0}"
port: "${HTTP_BIND_PORT:8080}"
undertow:
buffer-size: "${SERVER_UNDERTOW_BUFFER_SIZE:16384}"
directBuffers: "${SERVER_UNDERTOW_DIRECT_BUFFERS:true}"
threads:
io: "${SERVER_UNDERTOW_THREADS_IO:4}"
worker: "${SERVER_UNDERTOW_THREADS_WORKER:128}"
max-http-post-size: "${SERVER_UNDERTOW_MAX_HTTP_POST_SIZE:10MB}"
no-request-timeout: "${SERVER_UNDERTOW_NO_REQUEST_TIMEOUT:10000}"
accesslog:
enabled: true
pattern: "%t %a %r %s (%D ms)"
dir: /var/log/sanbing/accesslog
options:
server:
record-request-start-time: true
spring:
application:
name: "${SPRING_APPLICATION_NAME:java-charge-point-server}"
datasource:
driver-class-name: "${SPRING_DRIVER_CLASS_NAME:org.postgresql.Driver}"
url: "${SPRING_DATASOURCE_URL:jdbc:postgresql://10.102.12.102:30135/jcpp}"
username: "${SPRING_DATASOURCE_USERNAME:postgres}"
password: "${SPRING_DATASOURCE_PASSWORD:postgres}"
hikari:
leak-detection-threshold: "${SPRING_DATASOURCE_HIKARI_LEAK_DETECTION_THRESHOLD:0}"
maximum-pool-size: "${SPRING_DATASOURCE_MAXIMUM_POOL_SIZE:16}"
register-mbeans: "${SPRING_DATASOURCE_HIKARI_REGISTER_MBEANS:false}"
mybatis-plus:
type-handlers-package: sanbing.jcpp.app.dal.config.ibatis.typehandlers
management:
endpoints:
web:
exposure:
include: '${METRICS_ENDPOINTS_EXPOSE:prometheus,health}'
endpoint:
health:
show-details: always
metrics:
enabled: "${METRICS_ENABLED:true}"
timer:
percentiles: "${METRICS_TIMER_PERCENTILES:0.5}"
# 应用程序服务注册中心配置
zk:
enabled: "${ZOOKEEPER_ENABLED:true}"
url: "${ZOOKEEPER_URL:zookeeper:2181}"
retry-interval-ms: "${ZOOKEEPER_RETRY_INTERVAL_MS:3000}"
connection-timeout-ms: "${ZOOKEEPER_CONNECTION_TIMEOUT_MS:3000}"
session-timeout-ms: "${ZOOKEEPER_SESSION_TIMEOUT_MS:3000}"
zk-dir: "${ZOOKEEPER_NODES_DIR:/jcpp}"
recalculate-delay: "${ZOOKEEPER_RECALCULATE_DELAY_MS:0}"
# 队列配置
queue:
# 可选 kafka、memory
type: "${QUEUE_TYPE:memory}"
partitions:
hash_function_name: "${QUEUE_PARTITIONS_HASH_FUNCTION_NAME:murmur3_128}" # murmur3_32, murmur3_128 or sha256
in_memory:
stats:
print-interval-ms: "${QUEUE_IN_MEMORY_STATS_PRINT_INTERVAL_MS:60000}"
kafka:
bootstrap-servers: "${KAFKA_SERVERS:kafka:9092}"
ssl:
enabled: "${KAFKA_SSL_ENABLED:false}"
truststore-location: "${KAFKA_SSL_TRUSTSTORE_LOCATION:}"
truststore-password: "${KAFKA_SSL_TRUSTSTORE_PASSWORD:}"
keystore-location: "${KAFKA_SSL_KEYSTORE_LOCATION:}"
keystore-password: "${KAFKA_SSL_KEYSTORE_PASSWORD:}"
key-password: "${KAFKA_SSL_KEY_PASSWORD:}"
acks: "${KAFKA_ACKS:1}"
retries: "${KAFKA_RETRIES:1}"
compression-type: "${KAFKA_COMPRESSION_TYPE:lz4}" # none, gzip, snappy, lz4, zstd
batch-size: "${KAFKA_BATCH_SIZE:1048576}"
linger-ms: "${KAFKA_LINGER_MS:1}"
max-request-size: "${KAFKA_MAX_REQUEST_SIZE:1048576}"
max-in-flight-requests-per-connection: "${KAFKA_MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION:5}"
buffer-memory: "${BUFFER_MEMORY:33554432}"
replication-factor: "${QUEUE_KAFKA_REPLICATION_FACTOR:1}"
max-poll-interval-ms: "${QUEUE_KAFKA_MAX_POLL_INTERVAL_MS:300000}"
max-poll-records: "${QUEUE_KAFKA_MAX_POLL_RECORDS:10240}"
max-partition-fetch-bytes: "${QUEUE_KAFKA_MAX_PARTITION_FETCH_BYTES:16777216}"
fetch-max-bytes: "${QUEUE_KAFKA_FETCH_MAX_BYTES:134217728}"
request-timeout-ms: "${QUEUE_KAFKA_REQUEST_TIMEOUT_MS:30000}"
session-timeout-ms: "${QUEUE_KAFKA_SESSION_TIMEOUT_MS:10000}"
auto-offset-reset: "${QUEUE_KAFKA_AUTO_OFFSET_RESET:earliest}"
other-inline: "${QUEUE_KAFKA_OTHER_PROPERTIES:}"
topic-properties:
app: "${QUEUE_KAFKA_APP_TOPIC_PROPERTIES:retention.ms:604800000;segment.bytes:52428800;retention.bytes:1048576000;partitions:1;min.insync.replicas:1}"
consumer-stats:
enabled: "${QUEUE_KAFKA_CONSUMER_STATS_ENABLED:true}"
print-interval-ms: "${QUEUE_KAFKA_CONSUMER_STATS_MIN_PRINT_INTERVAL_MS:60000}"
kafka-response-timeout-ms: "${QUEUE_KAFKA_CONSUMER_STATS_RESPONSE_TIMEOUT_MS:1000}"
app:
topic: "${QUEUE_APP_TOPIC:protocol_uplink}"
poll-interval: "${QUEUE_APP_POLL_INTERVAL_MS:5}"
pack-processing-timeout: "${QUEUE_APP_PACK_PROCESSING_TIMEOUT_MS:2000}"
consumer-per-partition: "${QUEUE_APP_CONSUMER_PER_PARTITION:true}"
partitions: "${QUEUE_APP_PARTITIONS:10}"
# 可选 protobuf推荐、json需要跟..forwarder.kafka.encoder保持一致
decoder: "${QUEUE_APP_DECODER:protobuf}"
stats:
enabled: "${QUEUE_APP_STATS_ENABLED:true}"
print-interval-ms: "${QUEUE_APP_STATS_PRINT_INTERVAL_MS:60000}"
# 应用程序缓存配置
cache:
type: "${CACHE_TYPE:caffeine}" # caffeine or redis
specs:
piles:
timeToLiveInMinutes: "${CACHE_SPECS_PILES_TTL:15}"
maxSize: "${CACHE_SPECS_PILES_MAX_SIZE:1000}"
pileSessions:
timeToLiveInMinutes: "${CACHE_SPECS_PILE_SESSIONS_TTL:1440}"
maxSize: "${CACHE_SPECS_PILE_SESSIONS_MAX_SIZE:100000}"
redis:
connection:
type: "${REDIS_CONNECTION_TYPE:standalone}"
standalone:
host: "${REDIS_HOST:redis}"
port: "${REDIS_PORT:6379}"
useDefaultClientConfig: "${REDIS_USE_DEFAULT_CLIENT_CONFIG:true}"
clientName: "${REDIS_CLIENT_NAME:standalone}"
commandTimeout: "${REDIS_CLIENT_COMMAND_TIMEOUT:30000}"
shutdownTimeout: "${REDIS_CLIENT_SHUTDOWN_TIMEOUT:1000}"
readTimeout: "${REDIS_CLIENT_READ_TIMEOUT:60000}"
usePoolConfig: "${REDIS_CLIENT_USE_POOL_CONFIG:false}"
cluster:
nodes: "${REDIS_NODES:redis-node-0:6379,redis-node-1:6379,redis-node-2:6379,redis-node-3:6379,redis-node-4:6379,redis-node-5:6379}"
max-redirects: "${REDIS_MAX_REDIRECTS:12}"
useDefaultPoolConfig: "${REDIS_USE_DEFAULT_POOL_CONFIG:false}"
sentinel:
master: "${REDIS_MASTER:mymaster}"
sentinels: "${REDIS_SENTINELS:redis-sentinel:26379}"
password: "${REDIS_SENTINEL_PASSWORD:sanbing}"
useDefaultPoolConfig: "${REDIS_USE_DEFAULT_POOL_CONFIG:false}"
db: "${REDIS_DB:0}"
password: "${REDIS_PASSWORD:sanbing}"
pool_config:
maxTotal: "${REDIS_POOL_CONFIG_MAX_TOTAL:128}"
maxIdle: "${REDIS_POOL_CONFIG_MAX_IDLE:128}"
minIdle: "${REDIS_POOL_CONFIG_MIN_IDLE:16}"
testOnBorrow: "${REDIS_POOL_CONFIG_TEST_ON_BORROW:false}"
testOnReturn: "${REDIS_POOL_CONFIG_TEST_ON_RETURN:false}"
testWhileIdle: "${REDIS_POOL_CONFIG_TEST_WHILE_IDLE:true}"
minEvictableMs: "${REDIS_POOL_CONFIG_MIN_EVICTABLE_MS:60000}"
evictionRunsMs: "${REDIS_POOL_CONFIG_EVICTION_RUNS_MS:30000}"
maxWaitMills: "${REDIS_POOL_CONFIG_MAX_WAIT_MS:60000}"
numberTestsPerEvictionRun: "${REDIS_POOL_CONFIG_NUMBER_TESTS_PER_EVICTION_RUN:3}"
blockWhenExhausted: "${REDIS_POOL_CONFIG_BLOCK_WHEN_EXHAUSTED:true}"
evictTtlInMs: "${REDIS_EVICT_TTL_MS:60000}"
service:
# 服务类型:纯协议解析前置 - protocol纯应用后端 - app单体服务(包含protocol和app) - monolith
type: "${SERVICE_TYPE:monolith}"
# 可自定义的服务ID如果不指定则默认为HOSTNAME
id: "${SERVICE_ID:}"
protocols:
sessions:
default-inactivity-timeout-in-sec: "${PROTOCOLS_SESSIONS_DEFAULT_INACTIVITY_TIMEOUT_IN_SEC:600}"
default-state-check-interval-in-sec: "${PROTOCOLS_SESSIONS_DEFAULT_STATE_CHECK_INTERVAL_IN_SEC:60}"
yunkuaichongV150:
enabled: "${PROTOCOLS_YUNKUAICHONGV150_ENABLED:true}"
listener:
tcp:
bind-address: "${PROTOCOLS_YUNKUAICHONGV150_LISTENER_TCP_BIND_ADDRESS:0.0.0.0}"
bind-port: "${PROTOCOLS_YUNKUAICHONGV150_LISTENER_TCP_BIND_PORT:38001}"
boss-group-thread_count: "${PROTOCOLS_YUNKUAICHONGV150_LISTENER_TCP_BOSS_GROUP_THREADS:4}"
worker-group-thread-count: "${PROTOCOLS_YUNKUAICHONGV150_LISTENER_TCP_WORKER_GROUP_THREADS:16}"
so-keep-alive: "${PROTOCOLS_YUNKUAICHONGV150_LISTENER_TCP_SO_KEEPALIVE:true}"
so-backlog: "${PROTOCOLS_YUNKUAICHONGV150_LISTENER_TCP_SO_BACKLOG:128}"
so-rcvbuf: "${PROTOCOLS_YUNKUAICHONGV150_LISTENER_TCP_SO_RCVBUF:65536}"
so-sndbuf: "${PROTOCOLS_YUNKUAICHONGV150_LISTENER_TCP_SO_SNDBUF:65536}"
nodelay: "${PROTOCOLS_YUNKUAICHONGV150_LISTENER_TCP_NODELAY:true}"
handler:
idle-timeout-seconds: "${PROTOCOLS_YUNKUAICHONGV150_LISTENER_TCP_HANDLER_IDLE_TIMEOUT_SECONDS:600}"
max_connections: "${PROTOCOLS_YUNKUAICHONGV150_LISTENER_TCP_HANDLER_MAX_CONNECTIONS:100000}"
# 默认为二进制类型的拆包器
# 可选JSON类型的拆包器 "${PROTOCOLS_YUNKUAICHONGV150_NETTY_HANDLER_BINARY_CONFIGURATION:type:JSON}"
# 可选纯文本类型的拆包器 "${PROTOCOLS_YUNKUAICHONGV150_NETTY_HANDLER_BINARY_CONFIGURATION:type:TEXT;maxFrameLength:128;stripDelimiter:true;messageSeparator:null;charsetName:UTF-8}"
configuration: "${PROTOCOLS_YUNKUAICHONGV150_NETTY_HANDLER_BINARY_CONFIGURATION:type:BINARY;decoder:sanbing.jcpp.protocol.listener.tcp.decoder.JCPPLengthFieldBasedFrameDecoder;byteOrder:LITTLE_ENDIAN;head:68;lengthFieldOffset:1;lengthFieldLength:1;lengthAdjustment:2;initialBytesToStrip:0}"
forwarder:
# 如果是单体服务可选kafka、memory未来计划扩展RocketMQ, GRpc、REST
type: "${PROTOCOLS_YUNKUAICHONGV150_FORWARD_TYPE:memory}"
memory:
topic: "${PROTOCOLS_YUNKUAICHONGV150_FORWARD_MEMORY_TOPIC:protocol_uplink}"
kafka:
topic: "${PROTOCOLS_YUNKUAICHONGV150_FORWARD_KAFKA_TOPIC:protocol_uplink}"
jcpp-partition: "${PROTOCOLS_YUNKUAICHONGV150_FORWARD_KAFKA_JCPP_PARTITION:true}" # 是否利用JCPP的分片框架
# 以下配置只有在service.type为protocol时且jcpp-partition为false时才生效
bootstrap-servers: "${PROTOCOLS_YUNKUAICHONGV150_FORWARD_KAFKA_SERVERS:10.102.12.102:9092}"
acks: "${PROTOCOLS_YUNKUAICHONGV150_FORWARD_KAFKA_ACKS:1}"
# # 可选 protobuf推荐、json
encoder: "${PROTOCOLS_YUNKUAICHONGV150_FORWARD_KAFKA_ENCODER:protobuf}"
retries: "${PROTOCOLS_YUNKUAICHONGV150_FORWARD_KAFKA_RETRIES:1}"
compression-type: "${PROTOCOLS_YUNKUAICHONGV150_FORWARD_KAFKA_COMPRESSION_TYPE:lz4}" # none, gzip, snappy, lz4, zstd
batch-size: "${PROTOCOLS_YUNKUAICHONGV150_FORWARD_KAFKA_BATCH_SIZE:16384}"
linger-ms: "${PROTOCOLS_YUNKUAICHONGV150_FORWARD_KAFKA_LINGER_MS:0}"
buffer-memory: "${PROTOCOLS_YUNKUAICHONGV150_FORWARD_BUFFER_MEMORY:33554432}"
other-properties: "${PROTOCOLS_YUNKUAICHONGV150_FORWARD_QUEUE_KAFKA_OTHER_PROPERTIES:}"
thread-pool:
sharding:
hash_function_name: "${THREAD_POOL_SHARDING_HASH_FUNCTION_NAME:murmur3_128}" # murmur3_32, murmur3_128 or sha256
parallelism: "${THREAD_POOL_SHARDING_PARALLELISM:128}"
stats-print-interval-ms: "${THREAD_POOL_SHARDING_STATS_PRINT_INTERVAL_MS:10000}"

View File

@@ -0,0 +1,12 @@
___ ________ ________ ________
|\ \|\ ____\|\ __ \|\ __ \
\ \ \ \ \___|\ \ \|\ \ \ \|\ \
__ \ \ \ \ \ \ \ ____\ \ ____\
|\ \\_\ \ \ \____\ \ \___|\ \ \___|
\ \________\ \_______\ \__\ \ \__\
\|________|\|_______|\|__| \|__|
===================================================
:: ${application.title} :: ${application.formatted-version}
===================================================

View File

@@ -0,0 +1,56 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration status="INFO" monitorInterval="30">
<properties>
<Property name="LOG_DIR">/var/log/sanbing/jcpp</Property>
<Property name="LOG_PATTERN">%d{yyyy-MM-dd HH:mm:ss:SSS} [%X{TRACE_ID}] [%t] %p %c{1} %m%n%throwable</Property>
</properties>
<Appenders>
<Console name="CONSOLE" target="SYSTEM_OUT" follow="true">
<PatternLayout pattern="${LOG_PATTERN}"/>
</Console>
<RollingFile name="ROLLING_FILE" fileName="${LOG_DIR}/jcpp-app.log"
filePattern="${LOG_DIR}/jcpp-app.%d{yyyy-MM-dd}-%i.log"
immediateFlush="false">
<PatternLayout pattern="${LOG_PATTERN}"/>
<Policies>
<TimeBasedTriggeringPolicy modulate="true" interval="1"/>
</Policies>
<DefaultRolloverStrategy>
<Delete basePath="${LOG_DIR}" maxDepth="1">
<IfFileName glob="*.log"/>
<IfAccumulatedFileSize exceeds="10GB"/>
</Delete>
</DefaultRolloverStrategy>
</RollingFile>
</Appenders>
<Loggers>
<logger name="org.springframework" level="INFO" />
<AsyncRoot level="INFO" includeLocation="true">
<AppenderRef ref="CONSOLE"/>
<AppenderRef ref="ROLLING_FILE"/>
</AsyncRoot>
<AsyncLogger name="sanbing.jcpp" level="INFO" additivity="false" includeLocation="false">
<AppenderRef ref="CONSOLE"/>
<AppenderRef ref="ROLLING_FILE"/>
</AsyncLogger>
<AsyncLogger name="sanbing.jcpp.protocol" level="${env:PROTOCOLS_LOG_LEVEL:-TRACE}"
additivity="false" includeLocation="false">
<AppenderRef ref="CONSOLE"/>
<AppenderRef ref="ROLLING_FILE"/>
</AsyncLogger>
</Loggers>
</configuration>

View File

@@ -0,0 +1,27 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp;
import org.junit.jupiter.api.MethodOrderer;
import org.junit.jupiter.api.TestMethodOrder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ActiveProfiles;
/**
* @author baigod
*/
@ActiveProfiles("test")
@SpringBootTest(classes = JCPPServerApplication.class, webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT)
@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
public class AbstractTestBase {
static {
System.setProperty("spring.config.name", "app-service");
}
protected final Logger log = LoggerFactory.getLogger(this.getClass());
}

View File

@@ -0,0 +1,72 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.mapper;
import com.baomidou.mybatisplus.core.toolkit.Wrappers;
import jakarta.annotation.Resource;
import org.junit.jupiter.api.Test;
import sanbing.jcpp.AbstractTestBase;
import sanbing.jcpp.app.dal.config.ibatis.enums.GunOptStatusEnum;
import sanbing.jcpp.app.dal.config.ibatis.enums.GunRunStatusEnum;
import sanbing.jcpp.app.dal.config.ibatis.enums.OwnerTypeEnum;
import sanbing.jcpp.app.dal.entity.Gun;
import sanbing.jcpp.infrastructure.util.jackson.JacksonUtil;
import java.time.LocalDateTime;
import java.util.UUID;
import static sanbing.jcpp.app.dal.mapper.PileMapperTest.NORMAL_PILE_ID;
import static sanbing.jcpp.app.dal.mapper.StationMapperTest.NORMAL_STATION_ID;
import static sanbing.jcpp.app.dal.mapper.UserMapperTest.NORMAL_USER_ID;
/**
* @author baigod
*/
public class GunMapperTest extends AbstractTestBase {
static final UUID[] NORMAL_GUN_ID = new UUID[]{
UUID.fromString("8f1ffb5b-e536-4f2b-8cd0-31f7d0348a44"),
UUID.fromString("ae256617-b747-4110-b27a-00773e03bed1"),
UUID.fromString("d15dbb29-ea2f-4094-b448-dff853e9275f"),
UUID.fromString("b4a2de24-d7ff-4828-a0d8-2429a6253f9c"),
UUID.fromString("f505f7e2-9e1c-4251-8f7f-9a8eae84372a"),
UUID.fromString("0c5bab7b-786b-4e05-ab26-618c3f5a6086"),
UUID.fromString("2db4ad92-e353-4ac2-a2b0-942cb778eca6"),
UUID.fromString("203833e7-0a44-4f1c-935e-cd43e6dbbf46"),
UUID.fromString("3f3a61e9-de55-4177-9b4e-3a1d8c529890"),
UUID.fromString("cf1a8970-5aa9-4636-a76e-d6bcf98b4a07")
};
@Resource
GunMapper gunMapper;
@Test
void curdTest() {
gunMapper.delete(Wrappers.lambdaQuery());
for (int i = 0; i < NORMAL_PILE_ID.length; i++) {
UUID pileId = NORMAL_PILE_ID[i];
UUID gunId = NORMAL_GUN_ID[i];
Gun gun = Gun.builder()
.id(gunId)
.createdTime(LocalDateTime.now())
.additionalInfo(JacksonUtil.newObjectNode())
.gunNo("01")
.gunName("三丙的1号枪")
.gunCode("20231212000001-" + (i + 1))
.stationId(NORMAL_STATION_ID)
.pileId(pileId)
.ownerId(NORMAL_USER_ID)
.ownerType(OwnerTypeEnum.C)
.runStatus(GunRunStatusEnum.IDLE)
.runStatusUpdatedTime(LocalDateTime.now())
.optStatus(GunOptStatusEnum.AVAILABLE)
.build();
gunMapper.insertOrUpdate(gun);
log.info("{}", gunMapper.selectById(gunId));
}
}
}

View File

@@ -0,0 +1,66 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.mapper;
import cn.hutool.core.math.Money;
import cn.hutool.core.util.IdUtil;
import com.baomidou.mybatisplus.core.toolkit.Wrappers;
import jakarta.annotation.Resource;
import org.apache.commons.lang3.RandomStringUtils;
import org.junit.jupiter.api.Test;
import sanbing.jcpp.AbstractTestBase;
import sanbing.jcpp.app.dal.config.ibatis.enums.OrderStatusEnum;
import sanbing.jcpp.app.dal.config.ibatis.enums.OrderTypeEnum;
import sanbing.jcpp.app.dal.entity.Order;
import sanbing.jcpp.infrastructure.util.jackson.JacksonUtil;
import java.math.BigDecimal;
import java.time.LocalDateTime;
import java.util.UUID;
import static sanbing.jcpp.app.dal.mapper.GunMapperTest.NORMAL_GUN_ID;
import static sanbing.jcpp.app.dal.mapper.PileMapperTest.NORMAL_PILE_ID;
import static sanbing.jcpp.app.dal.mapper.StationMapperTest.NORMAL_STATION_ID;
import static sanbing.jcpp.app.dal.mapper.UserMapperTest.NORMAL_USER_ID;
/**
* @author baigod
*/
public class OrderMapperTest extends AbstractTestBase {
@Resource
OrderMapper orderMapper;
@Test
void testOrderMapper() {
orderMapper.delete(Wrappers.lambdaQuery());
Order order = Order.builder()
.id(UUID.randomUUID())
.internalOrderNo(IdUtil.getSnowflake(1, 1).nextIdStr())
.externalOrderNo(IdUtil.getSnowflake(1, 1).nextIdStr())
.pileOrderNo(RandomStringUtils.randomNumeric(16))
.createdTime(LocalDateTime.now())
.additionalInfo(JacksonUtil.newObjectNode())
.updatedTime(LocalDateTime.now())
.cancelledTime(null)
.status(OrderStatusEnum.IN_CHARGING)
.type(OrderTypeEnum.CHARGE)
.creatorId(NORMAL_USER_ID)
.stationId(NORMAL_STATION_ID)
.pileId(NORMAL_PILE_ID[0])
.gunId(NORMAL_GUN_ID[0])
.plateNo("浙A88888")
.settlementAmount(new Money(100D).getCent())
.settlementDetails(JacksonUtil.newObjectNode())
.electricityQuantity(new BigDecimal("100"))
.build();
orderMapper.insertOrUpdate(order);
log.info("{}", orderMapper.selectById(order.getId()));
}
}

View File

@@ -0,0 +1,74 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.mapper;
import com.baomidou.mybatisplus.core.toolkit.Wrappers;
import jakarta.annotation.Resource;
import org.junit.jupiter.api.Test;
import sanbing.jcpp.AbstractTestBase;
import sanbing.jcpp.app.dal.config.ibatis.enums.OwnerTypeEnum;
import sanbing.jcpp.app.dal.config.ibatis.enums.PileStatusEnum;
import sanbing.jcpp.app.dal.config.ibatis.enums.PileTypeEnum;
import sanbing.jcpp.app.dal.entity.Pile;
import sanbing.jcpp.infrastructure.util.jackson.JacksonUtil;
import java.text.DecimalFormat;
import java.time.LocalDateTime;
import java.util.UUID;
import static sanbing.jcpp.app.dal.mapper.StationMapperTest.NORMAL_STATION_ID;
import static sanbing.jcpp.app.dal.mapper.UserMapperTest.NORMAL_USER_ID;
/**
* @author baigod
*/
public class PileMapperTest extends AbstractTestBase {
static final UUID[] NORMAL_PILE_ID = new UUID[]{
UUID.fromString("fd7b3f60-db6c-4347-bff3-3c922985b95c"),
UUID.fromString("fa621927-6458-4e09-9666-99c52230db2b"),
UUID.fromString("afec0b0a-ad82-4923-97da-70e4a5d5e2c6"),
UUID.fromString("3e45ae30-2848-4d5a-a7b8-bd8504a6713d"),
UUID.fromString("349ff65e-ce8e-435a-928b-52fdef2828f2"),
UUID.fromString("e60d5b2d-8014-4f8f-b828-e207e6cf4a8f"),
UUID.fromString("8f010829-b505-4e57-8b93-6bdf981ac4e1"),
UUID.fromString("081842e2-9e74-4abb-aeab-b2cbfeb7a335"),
UUID.fromString("f04cf40a-0fbe-40f7-a07c-5b663ad68e98"),
UUID.fromString("ec522751-e1d3-4117-a887-3bdae7892369")
};
@Resource
PileMapper pileMapper;
@Test
void curdTest() {
pileMapper.delete(Wrappers.lambdaQuery());
for (int i = 0; i < 10; i++) {
UUID pileId = NORMAL_PILE_ID[i];
Pile pile = Pile.builder()
.id(pileId)
.createdTime(LocalDateTime.now())
.additionalInfo(JacksonUtil.newObjectNode())
.pileName(String.format("三丙家的%d号充电桩", i + 1))
.pileCode("202312120000" + new DecimalFormat("00").format(i + 1))
.protocol("yunkuaichongV150")
.stationId(NORMAL_STATION_ID)
.ownerId(NORMAL_USER_ID)
.ownerType(OwnerTypeEnum.C)
.brand("星星")
.model("10A")
.manufacturer("星星")
.status(PileStatusEnum.IDLE)
.type(PileTypeEnum.AC)
.build();
pileMapper.insertOrUpdate(pile);
log.info("{}", pileMapper.selectById(pileId));
}
}
}

View File

@@ -0,0 +1,55 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.mapper;
import com.baomidou.mybatisplus.core.toolkit.Wrappers;
import jakarta.annotation.Resource;
import org.junit.jupiter.api.Test;
import sanbing.jcpp.AbstractTestBase;
import sanbing.jcpp.app.dal.config.ibatis.enums.OwnerTypeEnum;
import sanbing.jcpp.app.dal.config.ibatis.enums.StationStatusEnum;
import sanbing.jcpp.app.dal.entity.Station;
import sanbing.jcpp.infrastructure.util.jackson.JacksonUtil;
import java.time.LocalDateTime;
import java.util.UUID;
import static sanbing.jcpp.app.dal.mapper.UserMapperTest.NORMAL_USER_ID;
/**
* @author baigod
*/
class StationMapperTest extends AbstractTestBase {
static final UUID NORMAL_STATION_ID = UUID.fromString("07d80c81-fe99-4a1f-a6aa-dc4d798b5626");
@Resource
StationMapper stationMapper;
@Test
void curdTest() {
stationMapper.delete(Wrappers.lambdaQuery());
Station station = Station.builder()
.id(NORMAL_STATION_ID)
.createdTime(LocalDateTime.now())
.additionalInfo(JacksonUtil.newObjectNode())
.stationName("三丙家专属充电站")
.stationCode("S20241001001")
.ownerId(NORMAL_USER_ID)
.longitude(120.107936F)
.latitude(30.267014F)
.ownerType(OwnerTypeEnum.C)
.province("浙江省")
.city("杭州市")
.county("西湖区")
.address("西溪路552-1号")
.status(StationStatusEnum.OPERATIONAL)
.build();
stationMapper.insertOrUpdate(station);
log.info("{}", stationMapper.selectById(NORMAL_STATION_ID));
}
}

View File

@@ -0,0 +1,44 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.mapper;
import com.baomidou.mybatisplus.core.toolkit.Wrappers;
import jakarta.annotation.Resource;
import org.junit.jupiter.api.Test;
import sanbing.jcpp.AbstractTestBase;
import sanbing.jcpp.app.dal.config.ibatis.enums.UserStatusEnum;
import sanbing.jcpp.app.dal.entity.User;
import sanbing.jcpp.infrastructure.util.jackson.JacksonUtil;
import java.time.LocalDateTime;
import java.util.UUID;
/**
* @author baigod
*/
class UserMapperTest extends AbstractTestBase {
static final UUID NORMAL_USER_ID = UUID.fromString("21cbf909-a23a-4396-840a-f34061f59f95");
@Resource
private UserMapper userMapper;
@Test
void curdTest() {
userMapper.delete(Wrappers.lambdaQuery());
User user = User.builder()
.id(NORMAL_USER_ID)
.createdTime(LocalDateTime.now())
.additionalInfo(JacksonUtil.newObjectNode())
.status(UserStatusEnum.ENABLE)
.userName("sanbing")
.userCredentials(JacksonUtil.newObjectNode())
.build();
userMapper.insertOrUpdate(user);
log.info("{}", userMapper.selectById(NORMAL_USER_ID));
}
}

View File

@@ -0,0 +1,98 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.cache;
import jakarta.annotation.Resource;
import org.junit.jupiter.api.Order;
import org.junit.jupiter.api.Test;
import org.springframework.data.redis.core.*;
import sanbing.jcpp.AbstractTestBase;
import java.time.Duration;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.CountDownLatch;
import java.util.stream.IntStream;
class RedisCacheConfigurationTest extends AbstractTestBase {
@Resource
RedisTemplate<String, Object> redisTemplate;
@Resource
ReactiveRedisTemplate<String, Object> reactiveRedisTemplate;
final static int testTimes = 10_000;
final static String hashKey = "hashKey";
@Test
@Order(1)
void kvTest() {
ValueOperations<String, Object> valueOperations = redisTemplate.opsForValue();
IntStream.range(0, testTimes).forEach(i -> {
String key = "field:" + i;
String value = "value:" + i;
valueOperations.set(key, value, Duration.ofMinutes(1));
});
Object o = valueOperations.get("field:1000");
System.out.println(Objects.requireNonNull(o).getClass() + " : " + o);
}
@Test
@Order(2)
void hashTest() {
HashOperations<String, Object, Object> hashOperations = redisTemplate.opsForHash();
IntStream.range(0, testTimes).forEach(i -> {
String key = "field:" + i;
String value = "value:" + i;
hashOperations.put(hashKey, key, value);
});
redisTemplate.expire(hashKey, Duration.ofMinutes(1));
Map<Object, Object> slowKey = hashOperations.entries(hashKey);
System.out.println("map size:" + slowKey.size());
}
@Test
@Order(3)
void reactiveKVTest() {
ReactiveValueOperations<String, Object> valueOperations = reactiveRedisTemplate.opsForValue();
IntStream.range(0, testTimes).forEach(i -> {
String key = "field:" + i;
String value = "value:" + i;
valueOperations.set(key, value, Duration.ofMinutes(1)).block();
});
Object o = valueOperations.get("field:1000").block();
System.out.println(Objects.requireNonNull(o).getClass() + " : " + o);
}
@Test
@Order(4)
void reactiveHashTest() throws InterruptedException {
ReactiveHashOperations<String, Object, Object> hashOperations = reactiveRedisTemplate.opsForHash();
IntStream.range(0, testTimes).forEach(i -> {
String key = "field:" + i;
String value = "value:" + i;
hashOperations.put(hashKey, key, value).block();
});
redisTemplate.expire(hashKey, Duration.ofMinutes(1));
CountDownLatch latch = new CountDownLatch(1);
hashOperations.entries(hashKey).collectList().subscribe(entries -> {
System.out.println("size:" + entries.size());
latch.countDown();
});
latch.await();
}
}

View File

@@ -0,0 +1,2 @@
redis.connection.type=cluster
redis.cluster.nodes=10.102.12.101:30700,10.102.12.101:32027,10.102.12.101:30767,10.102.12.101:30250,10.102.12.101:30612,10.102.12.101:32303

56
jcpp-app/pom.xml Normal file
View File

@@ -0,0 +1,56 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
抖音关注:程序员三丙
知识星球https://t.zsxq.com/j9b21
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>sanbing</groupId>
<artifactId>jcpp-parent</artifactId>
<version>1.0.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>jcpp-app</artifactId>
<packaging>jar</packaging>
<name>JChargePointProtocol App Module</name>
<description>应用模块</description>
<properties>
<main.dir>${basedir}/..</main.dir>
</properties>
<dependencies>
<dependency>
<groupId>sanbing</groupId>
<artifactId>jcpp-protocol-api</artifactId>
</dependency>
<dependency>
<groupId>sanbing</groupId>
<artifactId>jcpp-infrastructure-queue</artifactId>
</dependency>
<dependency>
<groupId>sanbing</groupId>
<artifactId>jcpp-infrastructure-cache</artifactId>
</dependency>
<dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-jdbc</artifactId>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-spring-boot3-starter</artifactId>
</dependency>
</dependencies>
</project>

View File

@@ -0,0 +1,76 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.config;
import com.baomidou.mybatisplus.annotation.DbType;
import com.baomidou.mybatisplus.extension.plugins.MybatisPlusInterceptor;
import com.baomidou.mybatisplus.extension.plugins.inner.PaginationInnerInterceptor;
import com.zaxxer.hikari.HikariDataSource;
import org.mybatis.spring.annotation.MapperScan;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.boot.autoconfigure.data.redis.RedisAutoConfiguration;
import org.springframework.boot.autoconfigure.jdbc.DataSourceProperties;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.namedparam.NamedParameterJdbcTemplate;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
import org.springframework.transaction.support.TransactionTemplate;
import javax.sql.DataSource;
@Configuration
@EnableAutoConfiguration(exclude = {RedisAutoConfiguration.class})
@MapperScan({"sanbing.jcpp.app.dal.mapper"})
public class DalConfig {
@Bean
@ConfigurationProperties("spring.datasource")
public DataSourceProperties dataSourceProperties() {
return new DataSourceProperties();
}
@Primary
@ConfigurationProperties(prefix = "spring.datasource.hikari")
@Bean
public DataSource dataSource(@Qualifier("dataSourceProperties") DataSourceProperties dataSourceProperties) {
return dataSourceProperties.initializeDataSourceBuilder().type(HikariDataSource.class).build();
}
@Primary
@Bean
public JdbcTemplate jdbcTemplate(@Qualifier("dataSource") DataSource dataSource) {
return new JdbcTemplate(dataSource);
}
@Primary
@Bean
public NamedParameterJdbcTemplate namedParameterJdbcTemplate(@Qualifier("dataSource") DataSource dataSource) {
return new NamedParameterJdbcTemplate(dataSource);
}
@Primary
@Bean
public TransactionTemplate transactionTemplate(DataSourceTransactionManager transactionManager) {
TransactionTemplate transactionTemplate = new TransactionTemplate(transactionManager);
transactionTemplate.setIsolationLevel(TransactionTemplate.ISOLATION_READ_COMMITTED);
transactionTemplate.setPropagationBehavior(TransactionTemplate.PROPAGATION_REQUIRED);
return transactionTemplate;
}
@Bean
@Primary
public MybatisPlusInterceptor mybatisPlusInterceptor() {
MybatisPlusInterceptor mybatisPlusInterceptor = new MybatisPlusInterceptor();
PaginationInnerInterceptor paginationInnerInterceptor = new PaginationInnerInterceptor();
paginationInnerInterceptor.setDbType(DbType.POSTGRE_SQL);
mybatisPlusInterceptor.addInnerInterceptor(paginationInnerInterceptor);
return mybatisPlusInterceptor;
}
}

View File

@@ -0,0 +1,22 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.config.ibatis.enums;
import com.baomidou.mybatisplus.annotation.IEnum;
/**
* @author baigod
*/
public enum GunOptStatusEnum implements IEnum<String> {
AVAILABLE, // 可用状态
IN_MAINTENANCE, // 维护中状态
OUT_OF_SERVICE, // 停用状态
RESERVED; // 已预约状态
@Override
public String getValue() {
return name();
}
}

View File

@@ -0,0 +1,27 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.config.ibatis.enums;
import com.baomidou.mybatisplus.annotation.IEnum;
/**
* @author baigod
*/
public enum GunRunStatusEnum implements IEnum<String> {
IDLE, // 空闲
INSERTED, // 已插枪
CHARGING, // 充电中
CHARGE_COMPLETE, // 充电完成
DISCHARGE_READY, // 放电准备
DISCHARGING, // 放电中
DISCHARGE_COMPLETE, // 放电完成
RESERVED, // 预约
FAULT; // 故障
@Override
public String getValue() {
return name();
}
}

View File

@@ -0,0 +1,24 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.config.ibatis.enums;
import com.baomidou.mybatisplus.annotation.IEnum;
public enum OrderStatusEnum implements IEnum<String> {
PENDING,
IN_CHARGING,
COMPLETED,
CANCELLED,
TERMINATED,
FAILED,
REFUNDED;
@Override
public String getValue() {
return name();
}
}

View File

@@ -0,0 +1,21 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.config.ibatis.enums;
import com.baomidou.mybatisplus.annotation.IEnum;
/**
* @author baigod
*/
public enum OrderTypeEnum implements IEnum<String> {
CHARGE,
DISCHARGE;
@Override
public String getValue() {
return name();
}
}

View File

@@ -0,0 +1,21 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.config.ibatis.enums;
import com.baomidou.mybatisplus.annotation.IEnum;
/**
* @author baigod
*/
public enum OwnerTypeEnum implements IEnum<String> {
C,
B,
G;
@Override
public String getValue() {
return name();
}
}

View File

@@ -0,0 +1,24 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.config.ibatis.enums;
import com.baomidou.mybatisplus.annotation.IEnum;
/**
* @author baigod
*/
public enum PileStatusEnum implements IEnum<String> {
IDLE, // 空闲
WORKING, // 工作中
FAULT, // 故障
MAINTENANCE, // 维护中
OFFLINE, // 离线
;
@Override
public String getValue() {
return name();
}
}

View File

@@ -0,0 +1,21 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.config.ibatis.enums;
import com.baomidou.mybatisplus.annotation.IEnum;
/**
* @author baigod
*/
public enum PileTypeEnum implements IEnum<String> {
AC, // 交流充电桩
DC, // 直流充电桩
;
@Override
public String getValue() {
return name();
}
}

View File

@@ -0,0 +1,26 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.config.ibatis.enums;
import com.baomidou.mybatisplus.annotation.IEnum;
/**
* @author baigod
*/
public enum StationStatusEnum implements IEnum<String> {
OPERATIONAL, // 正常运营
PARTIAL_FAILURE, // 部分故障
FULLY_LOADED, // 满载
MAINTENANCE, // 维护中
CLOSED, // 关闭
WAITING_FOR_OPEN; // 待开放
@Override
public String getValue() {
return name();
}
}

View File

@@ -0,0 +1,20 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.config.ibatis.enums;
import com.baomidou.mybatisplus.annotation.IEnum;
/**
* @author baigod
*/
public enum UserStatusEnum implements IEnum<String> {
ENABLE,
DISABLE;
@Override
public String getValue() {
return name();
}
}

View File

@@ -0,0 +1,40 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.config.ibatis.typehandlers;
import com.baomidou.mybatisplus.extension.handlers.JacksonTypeHandler;
import com.fasterxml.jackson.databind.JsonNode;
import lombok.extern.slf4j.Slf4j;
import org.apache.ibatis.type.JdbcType;
import org.apache.ibatis.type.MappedTypes;
import org.postgresql.util.PGobject;
import sanbing.jcpp.infrastructure.util.jackson.JacksonUtil;
import java.lang.reflect.Field;
import java.sql.PreparedStatement;
import java.sql.SQLException;
@Slf4j
@MappedTypes({JsonNode.class})
public class JsonbTypeHandler extends JacksonTypeHandler {
public JsonbTypeHandler(Class<?> type) {
super(type);
}
public JsonbTypeHandler(Class<?> type, Field field) {
super(type, field);
}
@Override
public void setNonNullParameter(PreparedStatement ps, int i, Object parameter, JdbcType jdbcType) throws SQLException {
if (ps != null) {
PGobject jsonObject = new PGobject();
jsonObject.setType("jsonb");
jsonObject.setValue(JacksonUtil.toString(parameter));
ps.setObject(i, jsonObject);
}
}
}

View File

@@ -0,0 +1,40 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.config.ibatis.typehandlers;
import org.apache.ibatis.type.BaseTypeHandler;
import org.apache.ibatis.type.JdbcType;
import java.sql.CallableStatement;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.UUID;
/**
* mysql UUID 类型转 varchar
*/
public class UUIDTypeHandler extends BaseTypeHandler<UUID> {
@Override
public void setNonNullParameter(PreparedStatement ps, int i, UUID parameter, JdbcType jdbcType) throws SQLException {
ps.setObject(i, parameter);
}
@Override
public UUID getNullableResult(ResultSet rs, String columnName) throws SQLException {
return rs.getObject(columnName, UUID.class);
}
@Override
public UUID getNullableResult(ResultSet rs, int columnIndex) throws SQLException {
return rs.getObject(columnIndex, UUID.class);
}
@Override
public UUID getNullableResult(CallableStatement cs, int columnIndex) throws SQLException {
return cs.getObject(columnIndex, UUID.class);
}
}

View File

@@ -0,0 +1,61 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.entity;
import com.baomidou.mybatisplus.annotation.IdType;
import com.baomidou.mybatisplus.annotation.TableId;
import com.baomidou.mybatisplus.annotation.TableName;
import com.fasterxml.jackson.databind.JsonNode;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import sanbing.jcpp.app.dal.config.ibatis.enums.GunOptStatusEnum;
import sanbing.jcpp.app.dal.config.ibatis.enums.GunRunStatusEnum;
import sanbing.jcpp.app.dal.config.ibatis.enums.OwnerTypeEnum;
import sanbing.jcpp.infrastructure.cache.HasVersion;
import java.io.Serializable;
import java.time.LocalDateTime;
import java.util.UUID;
@Data
@TableName("jcpp_gun")
@Builder
@AllArgsConstructor
@NoArgsConstructor
public class Gun implements Serializable, HasVersion {
@TableId(type = IdType.INPUT)
private UUID id;
private LocalDateTime createdTime;
private JsonNode additionalInfo;
private String gunNo;
private String gunName;
private String gunCode;
private UUID stationId;
private UUID pileId;
private UUID ownerId;
private OwnerTypeEnum ownerType;
private GunRunStatusEnum runStatus;
private LocalDateTime runStatusUpdatedTime;
private GunOptStatusEnum optStatus;
private Integer version;
}

View File

@@ -0,0 +1,69 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.entity;
import com.baomidou.mybatisplus.annotation.IdType;
import com.baomidou.mybatisplus.annotation.TableId;
import com.baomidou.mybatisplus.annotation.TableName;
import com.fasterxml.jackson.databind.JsonNode;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import sanbing.jcpp.app.dal.config.ibatis.enums.OrderStatusEnum;
import sanbing.jcpp.app.dal.config.ibatis.enums.OrderTypeEnum;
import java.io.Serializable;
import java.math.BigDecimal;
import java.time.LocalDateTime;
import java.util.UUID;
@Data
@TableName("jcpp_order")
@Builder
@AllArgsConstructor
@NoArgsConstructor
public class Order implements Serializable {
@TableId(type = IdType.INPUT)
private UUID id;
private String internalOrderNo;
private String externalOrderNo;
private String pileOrderNo;
private LocalDateTime createdTime;
private JsonNode additionalInfo;
private LocalDateTime updatedTime;
private LocalDateTime cancelledTime;
private OrderStatusEnum status;
private OrderTypeEnum type;
private UUID creatorId;
private UUID stationId;
private UUID pileId;
private UUID gunId;
private String plateNo;
private Long settlementAmount;
private JsonNode settlementDetails;
private BigDecimal electricityQuantity;
}

View File

@@ -0,0 +1,61 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.entity;
import com.baomidou.mybatisplus.annotation.IdType;
import com.baomidou.mybatisplus.annotation.TableId;
import com.baomidou.mybatisplus.annotation.TableName;
import com.fasterxml.jackson.databind.JsonNode;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import sanbing.jcpp.app.dal.config.ibatis.enums.OwnerTypeEnum;
import sanbing.jcpp.app.dal.config.ibatis.enums.PileStatusEnum;
import sanbing.jcpp.app.dal.config.ibatis.enums.PileTypeEnum;
import sanbing.jcpp.infrastructure.cache.HasVersion;
import java.io.Serializable;
import java.time.LocalDateTime;
import java.util.UUID;
@Data
@TableName(value = "jcpp_pile", autoResultMap = true)
@Builder
@AllArgsConstructor
@NoArgsConstructor
public class Pile implements Serializable, HasVersion {
@TableId(type = IdType.INPUT)
private UUID id;
private LocalDateTime createdTime;
private JsonNode additionalInfo;
private String pileName;
private String pileCode;
private String protocol;
private UUID stationId;
private UUID ownerId;
private OwnerTypeEnum ownerType;
private String brand;
private String model;
private String manufacturer;
private PileStatusEnum status;
private PileTypeEnum type;
private Integer version;
}

View File

@@ -0,0 +1,62 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.entity;
import com.baomidou.mybatisplus.annotation.IdType;
import com.baomidou.mybatisplus.annotation.TableId;
import com.baomidou.mybatisplus.annotation.TableName;
import com.fasterxml.jackson.databind.JsonNode;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import sanbing.jcpp.app.dal.config.ibatis.enums.OwnerTypeEnum;
import sanbing.jcpp.app.dal.config.ibatis.enums.StationStatusEnum;
import sanbing.jcpp.infrastructure.cache.HasVersion;
import java.io.Serializable;
import java.time.LocalDateTime;
import java.util.UUID;
@Data
@TableName("jcpp_station")
@Builder
@AllArgsConstructor
@NoArgsConstructor
public class Station implements Serializable, HasVersion {
@TableId(type = IdType.INPUT)
private UUID id;
private LocalDateTime createdTime;
private JsonNode additionalInfo;
private String stationName;
private String stationCode;
private UUID ownerId;
private Float longitude;
private Float latitude;
private OwnerTypeEnum ownerType;
private String province;
private String city;
private String county;
private String address;
private StationStatusEnum status;
private Integer version;
}

View File

@@ -0,0 +1,45 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.entity;
import com.baomidou.mybatisplus.annotation.IdType;
import com.baomidou.mybatisplus.annotation.TableId;
import com.baomidou.mybatisplus.annotation.TableName;
import com.fasterxml.jackson.databind.JsonNode;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import sanbing.jcpp.app.dal.config.ibatis.enums.UserStatusEnum;
import sanbing.jcpp.infrastructure.cache.HasVersion;
import java.io.Serializable;
import java.time.LocalDateTime;
import java.util.UUID;
@Data
@TableName("jcpp_user")
@Builder
@AllArgsConstructor
@NoArgsConstructor
public class User implements Serializable, HasVersion {
@TableId(type = IdType.INPUT)
private UUID id;
private LocalDateTime createdTime;
private JsonNode additionalInfo;
private UserStatusEnum status;
private String userName;
private JsonNode userCredentials;
private Integer version;
}

View File

@@ -0,0 +1,14 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.mapper;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import sanbing.jcpp.app.dal.entity.Gun;
/**
* @author baigod
*/
public interface GunMapper extends BaseMapper<Gun> {
}

View File

@@ -0,0 +1,14 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.mapper;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import sanbing.jcpp.app.dal.entity.Order;
/**
* @author baigod
*/
public interface OrderMapper extends BaseMapper<Order> {
}

View File

@@ -0,0 +1,23 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.mapper;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import org.apache.ibatis.annotations.Select;
import sanbing.jcpp.app.dal.entity.Pile;
/**
* @author baigod
*/
public interface PileMapper extends BaseMapper<Pile> {
@Select("SELECT " +
" * " +
"FROM " +
" jcpp_pile " +
"WHERE " +
" pile_code = #{pileCode}")
Pile selectByCode(String pileCode);
}

View File

@@ -0,0 +1,14 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.mapper;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import sanbing.jcpp.app.dal.entity.Station;
/**
* @author baigod
*/
public interface StationMapper extends BaseMapper<Station> {
}

View File

@@ -0,0 +1,14 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.dal.mapper;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import sanbing.jcpp.app.dal.entity.User;
/**
* @author baigod
*/
public interface UserMapper extends BaseMapper<User> {
}

View File

@@ -0,0 +1,57 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.data;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.Data;
import java.io.Serializable;
import java.util.UUID;
/**
* @author baigod
*/
@Data
public class PileSession implements Serializable {
private final UUID pileId;
private final String pileCode;
private final String protocolName;
private UUID protocolSessionId;
private String remoteAddress;
private String nodeId;
private String nodeWebapiIpPort;
public PileSession(UUID pileId, String pileCode, String protocolName) {
this.pileId = pileId;
this.pileCode = pileCode;
this.protocolName = protocolName;
}
@JsonCreator
public PileSession(
@JsonProperty("pileId") UUID pileId,
@JsonProperty("pileCode") String pileCode,
@JsonProperty("protocolName") String protocolName,
@JsonProperty("protocolSessionId") UUID protocolSessionId,
@JsonProperty("remoteAddress") String remoteAddress,
@JsonProperty("nodeId") String nodeId,
@JsonProperty("nodeWebapiIpPort") String nodeWebapiIpPort) {
this.pileId = pileId;
this.pileCode = pileCode;
this.protocolName = protocolName;
this.protocolSessionId = protocolSessionId;
this.remoteAddress = remoteAddress;
this.nodeId = nodeId;
this.nodeWebapiIpPort = nodeWebapiIpPort;
}
}

View File

@@ -0,0 +1,23 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.repository;
import org.springframework.transaction.support.TransactionSynchronizationManager;
import java.io.Serializable;
public abstract class AbstractCachedEntityRepository<K extends Serializable, V extends Serializable, E> extends AbstractEntityRepository {
protected void publishEvictEvent(E event) {
if (TransactionSynchronizationManager.isActualTransactionActive()) {
eventPublisher.publishEvent(event);
} else {
handleEvictEvent(event);
}
}
public abstract void handleEvictEvent(E event);
}

View File

@@ -0,0 +1,17 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.repository;
import jakarta.annotation.Resource;
import lombok.extern.slf4j.Slf4j;
import org.springframework.context.ApplicationEventPublisher;
@Slf4j
public abstract class AbstractEntityRepository {
@Resource
protected ApplicationEventPublisher eventPublisher;
}

View File

@@ -0,0 +1,19 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.repository;
import jakarta.annotation.Resource;
import sanbing.jcpp.infrastructure.cache.HasVersion;
import sanbing.jcpp.infrastructure.cache.VersionedCache;
import sanbing.jcpp.infrastructure.cache.VersionedCacheKey;
import java.io.Serializable;
public abstract class CachedVersionedEntityRepository<K extends VersionedCacheKey, V extends Serializable & HasVersion, E> extends AbstractCachedEntityRepository<K, V, E> {
@Resource
protected VersionedCache<K, V> cache;
}

View File

@@ -0,0 +1,15 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.repository;
import sanbing.jcpp.app.dal.entity.Pile;
/**
* @author baigod
*/
public interface PileRepository {
Pile findPileByCode(String pileCode);
}

View File

@@ -0,0 +1,47 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.repository;
import jakarta.annotation.Resource;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Repository;
import org.springframework.transaction.event.TransactionalEventListener;
import sanbing.jcpp.app.dal.entity.Pile;
import sanbing.jcpp.app.dal.mapper.PileMapper;
import sanbing.jcpp.app.service.cache.pile.PileCacheEvictEvent;
import sanbing.jcpp.app.service.cache.pile.PileCacheKey;
import java.util.ArrayList;
import java.util.List;
import static sanbing.jcpp.infrastructure.util.validation.Validator.validateString;
/**
* @author baigod
*/
@Repository
@Slf4j
public class PileRepositoryImpl extends CachedVersionedEntityRepository<PileCacheKey, Pile, PileCacheEvictEvent> implements PileRepository {
@Resource
PileMapper pileMapper;
@TransactionalEventListener(classes = PileCacheEvictEvent.class)
@Override
public void handleEvictEvent(PileCacheEvictEvent event) {
// 如果修改或删除充电桩,需要在这里消费删除事件
List<PileCacheKey> toEvict = new ArrayList<>(3);
toEvict.add(new PileCacheKey(event.getPileId()));
toEvict.add(new PileCacheKey(event.getPileCode()));
cache.evict(toEvict);
}
@Override
public Pile findPileByCode(String pileCode) {
validateString(pileCode, code -> "无效的桩编号" + pileCode);
return cache.get(new PileCacheKey(pileCode),
() -> pileMapper.selectByCode(pileCode));
}
}

View File

@@ -0,0 +1,15 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.service;
import sanbing.jcpp.proto.gen.ProtocolProto.DownlinkRestMessage;
/**
* @author baigod
*/
public interface DownlinkCallService {
void sendDownlinkMessage(DownlinkRestMessage.Builder downlinkMessageBuilder, String pileCode);
}

View File

@@ -0,0 +1,66 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.service;
import sanbing.jcpp.infrastructure.queue.Callback;
import sanbing.jcpp.proto.gen.ProtocolProto.UplinkQueueMessage;
/**
* @author baigod
*/
public interface PileProtocolService {
/**
* 桩登录
*/
void pileLogin(UplinkQueueMessage uplinkQueueMessage, Callback callback);
/**
* 充电桩心跳
*/
void heartBeat(UplinkQueueMessage uplinkQueueMessage, Callback callback);
/**
* 校验计费模型
*/
void verifyPricing(UplinkQueueMessage uplinkQueueMessage, Callback callback);
/**
* 查询计费策略
*/
void queryPricing(UplinkQueueMessage uplinkQueueMessage, Callback callback);
/**
* 上报电桩运行状态
*/
void postGunRunStatus(UplinkQueueMessage uplinkQueueMessage, Callback callback);
/**
* 上报充电进度
*/
void postChargingProgress(UplinkQueueMessage uplinkQueueMessage, Callback callback);
/**
* 费率下发反馈
*/
void onSetPricingResponse(UplinkQueueMessage uplinkQueueMessage, Callback callback);
/**
* 远程启动反馈
*
* @param uplinkQueueMessage
* @param callback
*/
void onRemoteStartChargingResponse(UplinkQueueMessage uplinkQueueMessage, Callback callback);
/**
* 远程停止反馈
*/
void onRemoteStopChargingResponse(UplinkQueueMessage uplinkQueueMessage, Callback callback);
/**
* 交易记录上报
*/
void onTransactionRecord(UplinkQueueMessage uplinkQueueMessage, Callback callback);
}

View File

@@ -0,0 +1,17 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.service.cache.pile;
import lombok.Data;
import java.util.UUID;
@Data
public class PileCacheEvictEvent {
private UUID pileId;
private String pileCode;
}

View File

@@ -0,0 +1,47 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.service.cache.pile;
import lombok.Builder;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.RequiredArgsConstructor;
import sanbing.jcpp.infrastructure.cache.VersionedCacheKey;
import java.io.Serial;
import java.util.Optional;
import java.util.UUID;
@Getter
@EqualsAndHashCode
@RequiredArgsConstructor
@Builder
public class PileCacheKey implements VersionedCacheKey {
@Serial
private static final long serialVersionUID = 6366389552842340207L;
private final UUID pileId;
private final String pileCode;
public PileCacheKey(UUID pileId) {
this(pileId, null);
}
public PileCacheKey(String pileCode) {
this(null, pileCode);
}
@Override
public String toString() {
return Optional.ofNullable(pileId).map(UUID::toString).orElse(pileCode);
}
@Override
public boolean isVersioned() {
return pileId != null;
}
}

View File

@@ -0,0 +1,22 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.service.cache.pile;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.cache.CacheManager;
import org.springframework.stereotype.Service;
import sanbing.jcpp.app.dal.entity.Pile;
import sanbing.jcpp.infrastructure.cache.CacheConstants;
import sanbing.jcpp.infrastructure.cache.VersionedCaffeineCache;
@ConditionalOnProperty(prefix = "cache", value = "type", havingValue = "caffeine", matchIfMissing = true)
@Service("PileCache")
public class PileCaffeineCache extends VersionedCaffeineCache<PileCacheKey, Pile> {
public PileCaffeineCache(CacheManager cacheManager) {
super(cacheManager, CacheConstants.PILE_CACHE);
}
}

View File

@@ -0,0 +1,33 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.service.cache.pile;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.data.redis.connection.lettuce.LettuceConnectionFactory;
import org.springframework.data.redis.serializer.SerializationException;
import org.springframework.stereotype.Service;
import sanbing.jcpp.app.dal.entity.Pile;
import sanbing.jcpp.infrastructure.cache.*;
import sanbing.jcpp.infrastructure.util.jackson.JacksonUtil;
@ConditionalOnProperty(prefix = "cache", value = "type", havingValue = "redis")
@Service("PileCache")
public class PileRedisCache extends VersionedRedisCache<PileCacheKey, Pile> {
public PileRedisCache(JCPPRedisCacheConfiguration configuration, CacheSpecsMap cacheSpecsMap, LettuceConnectionFactory connectionFactory) {
super(CacheConstants.PILE_CACHE, cacheSpecsMap, connectionFactory, configuration, new JCPPRedisSerializer<>() {
@Override
public byte[] serialize(Pile pile) throws SerializationException {
return JacksonUtil.writeValueAsBytes(pile);
}
@Override
public Pile deserialize(PileCacheKey key, byte[] bytes) throws SerializationException {
return JacksonUtil.fromBytes(bytes, Pile.class);
}
});
}
}

View File

@@ -0,0 +1,41 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.service.cache.session;
import lombok.Builder;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.RequiredArgsConstructor;
import java.io.Serializable;
import java.util.Optional;
import java.util.UUID;
/**
* @author baigod
*/
@Getter
@EqualsAndHashCode
@RequiredArgsConstructor
@Builder
public class PileSessionCacheKey implements Serializable {
private final UUID pileId;
private final String pileCode;
public PileSessionCacheKey(UUID pileId) {
this(pileId, null);
}
public PileSessionCacheKey(String pileCode) {
this(null, pileCode);
}
@Override
public String toString() {
return Optional.ofNullable(pileId).map(UUID::toString).orElse(pileCode);
}
}

View File

@@ -0,0 +1,24 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.service.cache.session;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.cache.CacheManager;
import org.springframework.stereotype.Service;
import sanbing.jcpp.app.data.PileSession;
import sanbing.jcpp.infrastructure.cache.CacheConstants;
import sanbing.jcpp.infrastructure.cache.CaffeineTransactionalCache;
/**
* @author baigod
*/
@ConditionalOnProperty(prefix = "cache", value = "type", havingValue = "caffeine", matchIfMissing = true)
@Service("PileSessionCache")
public class PileSessionCaffeineCache extends CaffeineTransactionalCache<PileSessionCacheKey, PileSession> {
public PileSessionCaffeineCache(CacheManager cacheManager) {
super(cacheManager, CacheConstants.PILE_SESSION_CACHE);
}
}

View File

@@ -0,0 +1,36 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.service.cache.session;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.data.redis.connection.lettuce.LettuceConnectionFactory;
import org.springframework.data.redis.serializer.SerializationException;
import org.springframework.stereotype.Service;
import sanbing.jcpp.app.data.PileSession;
import sanbing.jcpp.infrastructure.cache.*;
import sanbing.jcpp.infrastructure.util.jackson.JacksonUtil;
/**
* @author baigod
*/
@ConditionalOnProperty(prefix = "cache", value = "type", havingValue = "redis")
@Service("PileSessionCache")
public class PileSessionRedisCache extends RedisTransactionalCache<PileSessionCacheKey, PileSession> {
public PileSessionRedisCache(JCPPRedisCacheConfiguration configuration, CacheSpecsMap cacheSpecsMap, LettuceConnectionFactory connectionFactory) {
super(CacheConstants.PILE_SESSION_CACHE, cacheSpecsMap, connectionFactory, configuration, new JCPPRedisSerializer<>() {
@Override
public byte[] serialize(PileSession pileSession) throws SerializationException {
return JacksonUtil.writeValueAsBytes(pileSession);
}
@Override
public PileSession deserialize(PileSessionCacheKey key, byte[] bytes) throws SerializationException {
return JacksonUtil.fromBytes(bytes, PileSession.class);
}
});
}
}

View File

@@ -0,0 +1,32 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.service.config;
import org.springframework.boot.web.client.RestTemplateBuilder;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.http.converter.protobuf.ProtobufHttpMessageConverter;
import org.springframework.web.client.RestTemplate;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import java.util.Collections;
/**
* @author baigod
*/
@Configuration
public class DownlinkRestTemplateConfiguration {
@Bean("downlinkRestTemplate")
public RestTemplate downlinkRestTemplate() {
RestTemplate restTemplate = new RestTemplateBuilder()
.setConnectTimeout(Duration.of(3, ChronoUnit.SECONDS))
.setReadTimeout(Duration.of(3, ChronoUnit.SECONDS))
.build();
restTemplate.setMessageConverters(Collections.singletonList(new ProtobufHttpMessageConverter()));
return restTemplate;
}
}

View File

@@ -0,0 +1,93 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.service.impl;
import jakarta.annotation.Resource;
import lombok.extern.slf4j.Slf4j;
import org.springframework.http.HttpEntity;
import org.springframework.http.HttpHeaders;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.stereotype.Service;
import org.springframework.web.client.RestClientException;
import org.springframework.web.client.RestTemplate;
import sanbing.jcpp.app.data.PileSession;
import sanbing.jcpp.app.service.DownlinkCallService;
import sanbing.jcpp.app.service.cache.session.PileSessionCacheKey;
import sanbing.jcpp.infrastructure.cache.CacheValueWrapper;
import sanbing.jcpp.infrastructure.cache.TransactionalCache;
import sanbing.jcpp.infrastructure.queue.discovery.ServiceInfoProvider;
import sanbing.jcpp.infrastructure.util.trace.TracerContextUtil;
import sanbing.jcpp.proto.gen.ProtocolProto.DownlinkRestMessage;
import sanbing.jcpp.protocol.adapter.DownlinkController;
import static sanbing.jcpp.infrastructure.util.trace.TracerContextUtil.*;
/**
* @author baigod
*/
@Service
@Slf4j
public class DefaultDownlinkCallService implements DownlinkCallService {
@Resource
RestTemplate downlinkRestTemplate;
@Resource
ServiceInfoProvider serviceInfoProvider;
@Resource
DownlinkController downlinkController;
@Resource
TransactionalCache<PileSessionCacheKey, PileSession> pileSessionCache;
@Override
public void sendDownlinkMessage(DownlinkRestMessage.Builder downlinkMessageBuilder, String pileCode) {
if (serviceInfoProvider.isMonolith()) {
downlinkController.onDownlink(downlinkMessageBuilder.build())
.setResultHandler(result -> log.info("下行消息发送完成"));
} else {
try {
CacheValueWrapper<PileSession> pileSessionCacheValueWrapper = pileSessionCache.get(new PileSessionCacheKey(pileCode));
if (pileSessionCacheValueWrapper == null) {
log.warn("充电桩会话不存在 {}", pileCode);
return;
}
PileSession pileSession = pileSessionCacheValueWrapper.get();
invokeDownlinkRestApi(downlinkMessageBuilder.build(), pileSession.getNodeWebapiIpPort());
} catch (RestClientException e) {
log.error("下行消息发送异常", e);
}
}
}
private void invokeDownlinkRestApi(DownlinkRestMessage downlinkRestMessage, String nodeWebapiIpPort) {
HttpHeaders headers = new HttpHeaders();
headers.add(JCPP_TRACER_ID, TracerContextUtil.getCurrentTracer().getTraceId());
headers.add(JCPP_TRACER_ORIGIN, TracerContextUtil.getCurrentTracer().getOrigin());
headers.add(JCPP_TRACER_TS, String.valueOf(TracerContextUtil.getCurrentTracer().getTracerTs()));
headers.setContentType(MediaType.parseMediaType("application/x-protobuf"));
HttpEntity<DownlinkRestMessage> entity = new HttpEntity<>(downlinkRestMessage, headers);
try {
ResponseEntity<?> response = downlinkRestTemplate.postForEntity("http://" + nodeWebapiIpPort + "/api/onDownlink",
entity, ResponseEntity.class);
log.info("下行消息发送成功 {}", response);
} catch (RestClientException e) {
log.error("下行消息发送失败 {}", downlinkRestMessage, e);
throw new RuntimeException(e);
}
}
}

View File

@@ -0,0 +1,264 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.service.impl;
import jakarta.annotation.Resource;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Service;
import sanbing.jcpp.app.dal.entity.Pile;
import sanbing.jcpp.app.data.PileSession;
import sanbing.jcpp.app.repository.PileRepository;
import sanbing.jcpp.app.service.DownlinkCallService;
import sanbing.jcpp.app.service.PileProtocolService;
import sanbing.jcpp.app.service.cache.session.PileSessionCacheKey;
import sanbing.jcpp.infrastructure.cache.TransactionalCache;
import sanbing.jcpp.infrastructure.proto.ProtoConverter;
import sanbing.jcpp.infrastructure.proto.model.PricingModel;
import sanbing.jcpp.infrastructure.proto.model.PricingModel.FlagPrice;
import sanbing.jcpp.infrastructure.proto.model.PricingModel.Period;
import sanbing.jcpp.infrastructure.queue.Callback;
import sanbing.jcpp.proto.gen.ProtocolProto.*;
import sanbing.jcpp.protocol.domain.DownlinkCmdEnum;
import java.time.LocalTime;
import java.util.*;
import static sanbing.jcpp.proto.gen.ProtocolProto.PricingModelFlag.*;
import static sanbing.jcpp.proto.gen.ProtocolProto.PricingModelRule.SPLIT_TIME;
import static sanbing.jcpp.proto.gen.ProtocolProto.PricingModelType.CHARGE;
/**
* @author baigod
*/
@Service
@Slf4j
public class DefaultPileProtocolService implements PileProtocolService {
@Resource
PileRepository pileRepository;
@Resource
TransactionalCache<PileSessionCacheKey, PileSession> pileSessionCache;
@Resource
DownlinkCallService downlinkCallService;
@Override
public void pileLogin(UplinkQueueMessage uplinkQueueMessage, Callback callback) {
log.info("接收到桩登录事件 {}", uplinkQueueMessage);
LoginRequest loginRequest = uplinkQueueMessage.getLoginRequest();
Pile pile = pileRepository.findPileByCode(loginRequest.getPileCode());
String pileCode = loginRequest.getPileCode();
log.info("查询到充电桩信息 {}", pile);
// 构造下行回复
DownlinkRestMessage.Builder downlinkMessageBuilder = createDownlinkMessageBuilder(uplinkQueueMessage, loginRequest.getPileCode());
downlinkMessageBuilder.setDownlinkCmd(DownlinkCmdEnum.LOGIN_ACK.name());
if (pile != null) {
// 保存到缓存
cacheSession(uplinkQueueMessage, pile,
loginRequest.getRemoteAddress(),
loginRequest.getNodeId(),
loginRequest.getNodeWebapiIpPort());
downlinkMessageBuilder.setLoginResponse(LoginResponse.newBuilder()
.setSuccess(true)
.setPileCode(loginRequest.getPileCode())
.build());
} else {
downlinkMessageBuilder.setLoginResponse(LoginResponse.newBuilder()
.setSuccess(false)
.setPileCode(loginRequest.getPileCode())
.build());
}
downlinkCallService.sendDownlinkMessage(downlinkMessageBuilder, pileCode);
callback.onSuccess();
}
@Override
public void heartBeat(UplinkQueueMessage uplinkQueueMessage, Callback callback) {
log.info("接收到桩心跳事件 {}", uplinkQueueMessage);
HeartBeatRequest heartBeatRequest = uplinkQueueMessage.getHeartBeatRequest();
Pile pile = pileRepository.findPileByCode(heartBeatRequest.getPileCode());
if (pile != null) {
// 重新保存到缓存
cacheSession(uplinkQueueMessage, pile,
heartBeatRequest.getRemoteAddress(),
heartBeatRequest.getNodeId(),
heartBeatRequest.getNodeWebapiIpPort());
}
}
private void cacheSession(UplinkQueueMessage uplinkQueueMessage, Pile pile, String remoteAddress, String nodeId, String nodeWebapiIpPort) {
PileSession pileSession = new PileSession(pile.getId(), pile.getPileCode(), uplinkQueueMessage.getProtocolName());
pileSession.setProtocolSessionId(new UUID(uplinkQueueMessage.getSessionIdMSB(), uplinkQueueMessage.getSessionIdLSB()));
pileSession.setRemoteAddress(remoteAddress);
pileSession.setNodeId(nodeId);
pileSession.setNodeWebapiIpPort(nodeWebapiIpPort);
pileSessionCache.put(new PileSessionCacheKey(pile.getId()), pileSession);
pileSessionCache.put(new PileSessionCacheKey(pile.getPileCode()), pileSession);
}
@Override
public void verifyPricing(UplinkQueueMessage uplinkQueueMessage, Callback callback) {
log.info("接收到计费模型验证请求 {}", uplinkQueueMessage);
VerifyPricingRequest verifyPricingRequest = uplinkQueueMessage.getVerifyPricingRequest();
String pileCode = verifyPricingRequest.getPileCode();
long pricingId = verifyPricingRequest.getPricingId();
// todo 默认校验成功,后续查库校验
assert pricingId > 0;
DownlinkRestMessage.Builder downlinkMessageBuilder = createDownlinkMessageBuilder(uplinkQueueMessage, pileCode);
downlinkMessageBuilder.setDownlinkCmd(DownlinkCmdEnum.VERIFY_PRICING_ACK.name());
downlinkMessageBuilder.setVerifyPricingResponse(VerifyPricingResponse.newBuilder()
.setSuccess(true)
.setPricingId(pricingId)
.build());
downlinkCallService.sendDownlinkMessage(downlinkMessageBuilder, pileCode);
callback.onSuccess();
}
@Override
public void queryPricing(UplinkQueueMessage uplinkQueueMessage, Callback callback) {
log.info("接收到充电桩计费模型请求 {}", uplinkQueueMessage);
QueryPricingRequest queryPricingRequest = uplinkQueueMessage.getQueryPricingRequest();
String pileCode = queryPricingRequest.getPileCode();
// TODO 先构造一个通用的计费模型,后续根据业务做库查询
List<Period> periods = new ArrayList<>();
periods.add(createPeriod(1, LocalTime.parse("00:00"), LocalTime.parse("06:00"), TOP));
periods.add(createPeriod(2, LocalTime.parse("06:00"), LocalTime.parse("12:00"), PEAK));
periods.add(createPeriod(3, LocalTime.parse("12:00"), LocalTime.parse("18:00"), FLAT));
periods.add(createPeriod(4, LocalTime.parse("18:00"), LocalTime.parse("00:00"), VALLEY));
Map<PricingModelFlag, FlagPrice> flagPriceMap = new HashMap<>();
flagPriceMap.put(TOP, new FlagPrice(75, 45));
flagPriceMap.put(PEAK, new FlagPrice(75, 45));
flagPriceMap.put(FLAT, new FlagPrice(75, 45));
flagPriceMap.put(VALLEY, new FlagPrice(75, 45));
PricingModel model = new PricingModel();
model.setId(UUID.randomUUID());
model.setSequenceNumber(1);
model.setPileCode(pileCode);
model.setType(CHARGE);
model.setRule(SPLIT_TIME);
model.setStandardElec(75);
model.setStandardServ(45);
model.setFlagPriceList(flagPriceMap);
model.setPeriodsList(periods);
// 构造下行计费
DownlinkRestMessage.Builder downlinkMessageBuilder = createDownlinkMessageBuilder(uplinkQueueMessage, pileCode);
downlinkMessageBuilder.setDownlinkCmd(DownlinkCmdEnum.QUERY_PRICING_ACK.name());
downlinkMessageBuilder.setQueryPricingResponse(QueryPricingResponse.newBuilder()
.setPileCode(pileCode)
.setPricingId(model.getSequenceNumber())
.setPricingModel(ProtoConverter.toPricingModel(model))
.build());
downlinkCallService.sendDownlinkMessage(downlinkMessageBuilder, pileCode);
callback.onSuccess();
}
@Override
public void postGunRunStatus(UplinkQueueMessage uplinkQueueMessage, Callback callback) {
log.info("接收到充电桩上报的电桩状态 {}", uplinkQueueMessage);
callback.onSuccess();
}
@Override
public void postChargingProgress(UplinkQueueMessage uplinkQueueMessage, Callback callback) {
log.info("接收到充电桩上报的充电进度 {}", uplinkQueueMessage);
callback.onSuccess();
}
@Override
public void onSetPricingResponse(UplinkQueueMessage uplinkQueueMessage, Callback callback) {
log.info("接收到充电桩上费率下发反馈 {}", uplinkQueueMessage);
callback.onSuccess();
}
@Override
public void onRemoteStartChargingResponse(UplinkQueueMessage uplinkQueueMessage, Callback callback) {
log.info("接收到充电桩启动结果反馈 {}", uplinkQueueMessage);
callback.onSuccess();
}
@Override
public void onRemoteStopChargingResponse(UplinkQueueMessage uplinkQueueMessage, Callback callback) {
log.info("接收到充电桩停止结果反馈 {}", uplinkQueueMessage);
callback.onSuccess();
}
@Override
public void onTransactionRecord(UplinkQueueMessage uplinkQueueMessage, Callback callback) {
log.info("接收到充电桩交易记录上报 {}", uplinkQueueMessage);
// todo 毛都不敢先给个回复
TransactionRecord transactionRecord = uplinkQueueMessage.getTransactionRecord();
String tradeNo = transactionRecord.getTradeNo();
String pileCode = transactionRecord.getPileCode();
// 构造下行计费
DownlinkRestMessage.Builder downlinkMessageBuilder = createDownlinkMessageBuilder(uplinkQueueMessage, pileCode);
downlinkMessageBuilder.setDownlinkCmd(DownlinkCmdEnum.TRANSACTION_RECORD.name());
downlinkMessageBuilder.setTransactionRecordAck(TransactionRecordAck.newBuilder()
.setTradeNo(tradeNo)
.setSuccess(true)
.build());
downlinkCallService.sendDownlinkMessage(downlinkMessageBuilder, pileCode);
callback.onSuccess();
}
private static Period createPeriod(int sn, LocalTime beginTime, LocalTime endTime, PricingModelFlag flag) {
Period period = new Period();
period.setSn(sn);
period.setBegin(beginTime);
period.setEnd(endTime);
period.setFlag(flag);
return period;
}
private DownlinkRestMessage.Builder createDownlinkMessageBuilder(UplinkQueueMessage uplinkQueueMessage, String pileCode) {
UUID messageId = UUID.randomUUID();
DownlinkRestMessage.Builder builder = DownlinkRestMessage.newBuilder();
builder.setMessageIdMSB(messageId.getLeastSignificantBits());
builder.setMessageIdLSB(messageId.getLeastSignificantBits());
builder.setPileCode(pileCode);
builder.setSessionIdMSB(uplinkQueueMessage.getSessionIdMSB());
builder.setSessionIdLSB(uplinkQueueMessage.getSessionIdLSB());
builder.setProtocolName(uplinkQueueMessage.getProtocolName());
builder.setRequestIdMSB(uplinkQueueMessage.getMessageIdMSB());
builder.setRequestIdLSB(uplinkQueueMessage.getMessageIdLSB());
builder.setRequestData(uplinkQueueMessage.getRequestData());
return builder;
}
}

View File

@@ -0,0 +1,66 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.service.queue;
import jakarta.annotation.PreDestroy;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.context.ApplicationEventPublisher;
import sanbing.jcpp.infrastructure.queue.discovery.PartitionProvider;
import sanbing.jcpp.infrastructure.queue.discovery.event.JCPPApplicationEventListener;
import sanbing.jcpp.infrastructure.queue.discovery.event.PartitionChangeEvent;
import sanbing.jcpp.infrastructure.util.annotation.AfterStartUp;
import sanbing.jcpp.infrastructure.util.async.JCPPExecutors;
import sanbing.jcpp.infrastructure.util.async.JCPPThreadFactory;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
@Slf4j
@RequiredArgsConstructor
public abstract class AbstractConsumerService extends JCPPApplicationEventListener<PartitionChangeEvent> {
protected final PartitionProvider partitionProvider;
protected final ApplicationEventPublisher eventPublisher;
protected ExecutorService consumersExecutor;
protected ExecutorService mgmtExecutor;
protected ScheduledExecutorService scheduler;
public void init(String prefix) {
this.consumersExecutor = Executors.newCachedThreadPool(JCPPThreadFactory.forName(prefix + "-consumer"));
this.mgmtExecutor = JCPPExecutors.newWorkStealingPool(getMgmtThreadPoolSize(), prefix + "-mgmt");
this.scheduler = Executors.newSingleThreadScheduledExecutor(JCPPThreadFactory.forName(prefix + "-consumer-scheduler"));
}
@AfterStartUp(order = AfterStartUp.REGULAR_SERVICE)
public void afterStartUp() {
startConsumers();
}
protected void startConsumers() {
}
protected void stopConsumers() {
}
protected abstract int getMgmtThreadPoolSize();
@PreDestroy
public void destroy() {
stopConsumers();
if (consumersExecutor != null) {
consumersExecutor.shutdownNow();
}
if (mgmtExecutor != null) {
mgmtExecutor.shutdownNow();
}
if (scheduler != null) {
scheduler.shutdownNow();
}
}
}

View File

@@ -0,0 +1,85 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.service.queue;
import io.micrometer.core.instrument.Timer;
import lombok.extern.slf4j.Slf4j;
import sanbing.jcpp.infrastructure.stats.StatsCounter;
import sanbing.jcpp.infrastructure.stats.StatsFactory;
import sanbing.jcpp.infrastructure.util.trace.TracerContextUtil;
import sanbing.jcpp.proto.gen.ProtocolProto.UplinkQueueMessage;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
@Slf4j
public class AppConsumerStats {
public static final String TOTAL_MSGS = "totalMsgs";
public static final String LOGIN_EVENTS = "loginEvents";
public static final String HEARTBEAT_EVENTS = "heartBeatEvents";
public static final String GUN_RUN_STATUS_EVENTS = "gunRunStatusEvents";
public static final String CHARGING_PROGRESS_EVENTS = "chargingProgressEvents";
public static final String TRANSACTION_RECORD_EVENTS = "transactionRecordEvents";
private final StatsCounter totalCounter;
private final StatsCounter loginCounter;
private final StatsCounter heartBeatCounter;
private final StatsCounter gunRunStatusCounter;
private final StatsCounter chargingProgressCounter;
private final StatsCounter transactionRecordCounter;
private final Timer appConsumerTimer;
private final List<StatsCounter> counters = new ArrayList<>();
public AppConsumerStats(StatsFactory statsFactory) {
String statsKey = "appConsumer";
this.totalCounter = register(statsFactory.createStatsCounter(statsKey, TOTAL_MSGS));
this.loginCounter = register(statsFactory.createStatsCounter(statsKey, LOGIN_EVENTS));
this.heartBeatCounter = register(statsFactory.createStatsCounter(statsKey, HEARTBEAT_EVENTS));
this.gunRunStatusCounter = register(statsFactory.createStatsCounter(statsKey, GUN_RUN_STATUS_EVENTS));
this.chargingProgressCounter = register(statsFactory.createStatsCounter(statsKey, CHARGING_PROGRESS_EVENTS));
this.transactionRecordCounter = register(statsFactory.createStatsCounter(statsKey, TRANSACTION_RECORD_EVENTS));
this.appConsumerTimer = statsFactory.createTimer(statsKey);
}
private StatsCounter register(StatsCounter counter) {
counters.add(counter);
return counter;
}
public void log(UplinkQueueMessage msg) {
totalCounter.increment();
if (msg.hasLoginRequest()) {
loginCounter.increment();
} else if (msg.hasHeartBeatRequest()) {
heartBeatCounter.increment();
} else if (msg.hasGunRunStatusProto()) {
gunRunStatusCounter.increment();
} else if (msg.hasChargingProgressProto()) {
chargingProgressCounter.increment();
} else if (msg.hasTransactionRecord()) {
transactionRecordCounter.increment();
}
appConsumerTimer.record(Duration.ofMillis(System.currentTimeMillis() - TracerContextUtil.getCurrentTracer().getTracerTs()));
}
public void printStats() {
int total = totalCounter.get();
if (total > 0) {
StringBuilder stats = new StringBuilder();
counters.forEach(counter -> {
stats.append(counter.getName()).append(" = [").append(counter.get()).append("] ");
});
log.info("App Queue Consumer Stats: {}", stats);
}
}
public void reset() {
counters.forEach(StatsCounter::clear);
}
}

View File

@@ -0,0 +1,296 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.service.queue;
import lombok.Builder;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import sanbing.jcpp.infrastructure.queue.QueueConsumer;
import sanbing.jcpp.infrastructure.queue.QueueMsg;
import sanbing.jcpp.infrastructure.queue.common.QueueConfig;
import sanbing.jcpp.infrastructure.queue.common.TopicPartitionInfo;
import sanbing.jcpp.infrastructure.util.async.JCPPThreadFactory;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.BiFunction;
import java.util.stream.Collectors;
@Slf4j
public class AppQueueConsumerManager<M extends QueueMsg, C extends QueueConfig> {
protected final String queueName;
@Getter
protected C config;
protected final MsgPackProcessor<M, C> msgPackProcessor;
protected final BiFunction<C, Integer, QueueConsumer<M>> consumerCreator;
protected final ExecutorService consumerExecutor;
protected final ScheduledExecutorService scheduler;
protected final ExecutorService taskExecutor;
private final Queue<QueueConsumerManagerTask> tasks = new ConcurrentLinkedQueue<>();
private final ReentrantLock lock = new ReentrantLock();
@Getter
private volatile Set<TopicPartitionInfo> partitions;
protected volatile ConsumerWrapper<M> consumerWrapper;
protected volatile boolean stopped;
@Builder
public AppQueueConsumerManager(String queueName, C config,
MsgPackProcessor<M, C> msgPackProcessor,
BiFunction<C, Integer, QueueConsumer<M>> consumerCreator,
ExecutorService consumerExecutor,
ScheduledExecutorService scheduler,
ExecutorService taskExecutor) {
this.queueName = queueName;
this.config = config;
this.msgPackProcessor = msgPackProcessor;
this.consumerCreator = consumerCreator;
this.consumerExecutor = consumerExecutor;
this.scheduler = scheduler;
this.taskExecutor = taskExecutor;
if (config != null) {
init(config);
}
}
public void init(C config) {
this.config = config;
if (config.isConsumerPerPartition()) {
this.consumerWrapper = new ConsumerPerPartitionWrapper();
} else {
this.consumerWrapper = new SingleConsumerWrapper();
}
log.debug("[{}] Initialized consumer for queue: {}", queueName, config);
}
public void update(C config) {
addTask(QueueConsumerManagerTask.configUpdate(config));
}
public void update(Set<TopicPartitionInfo> partitions) {
addTask(QueueConsumerManagerTask.partitionChange(partitions));
}
protected void addTask(QueueConsumerManagerTask todo) {
if (stopped) {
return;
}
tasks.add(todo);
log.info("[{}] Added task: {}", queueName, todo);
tryProcessTasks();
}
@SuppressWarnings("unchecked")
private void tryProcessTasks() {
taskExecutor.submit(() -> {
if (lock.tryLock()) {
try {
C newConfig = null;
Set<TopicPartitionInfo> newPartitions = null;
while (!stopped) {
QueueConsumerManagerTask task = tasks.poll();
if (task == null) {
break;
}
log.info("[{}] Processing task: {}", queueName, task);
if (task.getEvent() == QueueEvent.PARTITION_CHANGE) {
newPartitions = task.getPartitions();
} else if (task.getEvent() == QueueEvent.CONFIG_UPDATE) {
newConfig = (C) task.getConfig();
} else {
processTask(task);
}
}
if (stopped) {
return;
}
if (newConfig != null) {
doUpdate(newConfig);
}
if (newPartitions != null) {
doUpdate(newPartitions);
}
} catch (Exception e) {
log.error("[{}] Failed to process tasks", queueName, e);
} finally {
lock.unlock();
}
} else {
log.trace("[{}] Failed to acquire lock", queueName);
scheduler.schedule(this::tryProcessTasks, 1, TimeUnit.SECONDS);
}
});
}
protected void processTask(QueueConsumerManagerTask task) {
}
private void doUpdate(C newConfig) {
log.info("[{}] Processing queue update: {}", queueName, newConfig);
var oldConfig = this.config;
this.config = newConfig;
if (log.isTraceEnabled()) {
log.trace("[{}] Old queue configuration: {}", queueName, oldConfig);
log.trace("[{}] New queue configuration: {}", queueName, newConfig);
}
if (oldConfig == null) {
init(config);
} else if (newConfig.isConsumerPerPartition() != oldConfig.isConsumerPerPartition()) {
consumerWrapper.getConsumers().forEach(QueueConsumerTask::initiateStop);
consumerWrapper.getConsumers().forEach(QueueConsumerTask::awaitCompletion);
init(config);
if (partitions != null) {
doUpdate(partitions);
}
} else {
log.trace("[{}] Silently applied new config, because consumer-per-partition not changed", queueName);
}
}
private void doUpdate(Set<TopicPartitionInfo> partitions) {
this.partitions = partitions;
consumerWrapper.updatePartitions(partitions);
}
private void launchConsumer(QueueConsumerTask<M> consumerTask) {
log.info("[{}] Launching consumer", consumerTask.getKey());
Future<?> consumerLoop = consumerExecutor.submit(() -> {
JCPPThreadFactory.updateCurrentThreadName(consumerTask.getKey().toString());
try {
consumerLoop(consumerTask.getConsumer());
} catch (Throwable e) {
log.error("Failure in consumer loop", e);
}
log.info("[{}] Consumer stopped", consumerTask.getKey());
});
consumerTask.setTask(consumerLoop);
}
private void consumerLoop(QueueConsumer<M> consumer) {
while (!stopped && !consumer.isStopped()) {
try {
List<M> msgs = consumer.poll(config.getPollInterval());
if (msgs.isEmpty()) {
continue;
}
processMsgs(msgs, consumer, config);
} catch (Exception e) {
if (!consumer.isStopped()) {
log.warn("Failed to process messages from queue", e);
try {
Thread.sleep(config.getPollInterval());
} catch (InterruptedException e2) {
log.trace("Failed to wait until the server has capacity to handle new requests", e2);
}
}
}
}
if (consumer.isStopped()) {
consumer.unsubscribe();
}
}
protected void processMsgs(List<M> msgs, QueueConsumer<M> consumer, C config) throws Exception {
msgPackProcessor.process(msgs, consumer, config);
}
public void stop() {
log.debug("[{}] Stopping consumers", queueName);
consumerWrapper.getConsumers().forEach(QueueConsumerTask::initiateStop);
stopped = true;
}
public void awaitStop() {
log.debug("[{}] Waiting for consumers to stop", queueName);
consumerWrapper.getConsumers().forEach(QueueConsumerTask::awaitCompletion);
log.debug("[{}] Unsubscribed and stopped consumers", queueName);
}
private static String partitionsToString(Collection<TopicPartitionInfo> partitions) {
return partitions.stream().map(TopicPartitionInfo::getFullTopicName).collect(Collectors.joining(", ", "[", "]"));
}
public interface MsgPackProcessor<M extends QueueMsg, C extends QueueConfig> {
void process(List<M> msgs, QueueConsumer<M> consumer, C config) throws Exception;
}
public interface ConsumerWrapper<M extends QueueMsg> {
void updatePartitions(Set<TopicPartitionInfo> partitions);
Collection<QueueConsumerTask<M>> getConsumers();
}
class ConsumerPerPartitionWrapper implements ConsumerWrapper<M> {
private final Map<TopicPartitionInfo, QueueConsumerTask<M>> consumers = new HashMap<>();
@Override
public void updatePartitions(Set<TopicPartitionInfo> partitions) {
Set<TopicPartitionInfo> addedPartitions = new HashSet<>(partitions);
addedPartitions.removeAll(consumers.keySet());
Set<TopicPartitionInfo> removedPartitions = new HashSet<>(consumers.keySet());
removedPartitions.removeAll(partitions);
log.info("[{}] Added partitions: {}, removed partitions: {}", queueName, partitionsToString(addedPartitions), partitionsToString(removedPartitions));
removedPartitions.forEach((tpi) -> consumers.get(tpi).initiateStop());
removedPartitions.forEach((tpi) -> consumers.remove(tpi).awaitCompletion());
addedPartitions.forEach((tpi) -> {
Integer partitionId = tpi.getPartition().orElse(-1);
String key = queueName + "-" + partitionId;
QueueConsumerTask<M> consumer = new QueueConsumerTask<>(key, () -> consumerCreator.apply(config, partitionId));
consumers.put(tpi, consumer);
consumer.subscribe(Set.of(tpi));
launchConsumer(consumer);
});
}
@Override
public Collection<QueueConsumerTask<M>> getConsumers() {
return consumers.values();
}
}
class SingleConsumerWrapper implements ConsumerWrapper<M> {
private QueueConsumerTask<M> consumer;
@Override
public void updatePartitions(Set<TopicPartitionInfo> partitions) {
log.info("[{}] New partitions: {}", queueName, partitionsToString(partitions));
if (partitions.isEmpty()) {
if (consumer != null && consumer.isRunning()) {
consumer.initiateStop();
consumer.awaitCompletion();
}
consumer = null;
return;
}
if (consumer == null) {
consumer = new QueueConsumerTask<>(queueName, () -> consumerCreator.apply(config, null)); // no partitionId passed
}
consumer.subscribe(partitions);
if (!consumer.isRunning()) {
launchConsumer(consumer);
}
}
@Override
public Collection<QueueConsumerTask<M>> getConsumers() {
if (consumer == null) {
return Collections.emptyList();
}
return List.of(consumer);
}
}
}

View File

@@ -0,0 +1,37 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.service.queue;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.ToString;
import sanbing.jcpp.infrastructure.queue.common.QueueConfig;
import sanbing.jcpp.infrastructure.queue.common.TopicPartitionInfo;
import java.util.Set;
@Getter
@ToString
@AllArgsConstructor
public class QueueConsumerManagerTask {
private final QueueEvent event;
private QueueConfig config;
private Set<TopicPartitionInfo> partitions;
private boolean drainQueue;
public static QueueConsumerManagerTask delete(boolean drainQueue) {
return new QueueConsumerManagerTask(QueueEvent.DELETE, null, null, drainQueue);
}
public static QueueConsumerManagerTask configUpdate(QueueConfig config) {
return new QueueConsumerManagerTask(QueueEvent.CONFIG_UPDATE, config, null, false);
}
public static QueueConsumerManagerTask partitionChange(Set<TopicPartitionInfo> partitions) {
return new QueueConsumerManagerTask(QueueEvent.PARTITION_CHANGE, null, partitions, false);
}
}

View File

@@ -0,0 +1,78 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.service.queue;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import sanbing.jcpp.infrastructure.queue.QueueConsumer;
import sanbing.jcpp.infrastructure.queue.QueueMsg;
import sanbing.jcpp.infrastructure.queue.common.TopicPartitionInfo;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
@Slf4j
public class QueueConsumerTask<M extends QueueMsg> {
@Getter
private final Object key;
private volatile QueueConsumer<M> consumer;
private volatile Supplier<QueueConsumer<M>> consumerSupplier;
@Setter
private Future<?> task;
public QueueConsumerTask(Object key, Supplier<QueueConsumer<M>> consumerSupplier) {
this.key = key;
this.consumer = null;
this.consumerSupplier = consumerSupplier;
}
public QueueConsumer<M> getConsumer() {
if (consumer == null) {
synchronized (this) {
if (consumer == null) {
Objects.requireNonNull(consumerSupplier, "consumerSupplier for key [" + key + "] is null");
consumer = consumerSupplier.get();
Objects.requireNonNull(consumer, "consumer for key [" + key + "] is null");
consumerSupplier = null;
}
}
}
return consumer;
}
public void subscribe(Set<TopicPartitionInfo> partitions) {
log.info("[{}] Subscribing to partitions: {}", key, partitions);
getConsumer().subscribe(partitions);
}
public void initiateStop() {
log.debug("[{}] Initiating stop", key);
getConsumer().stop();
}
public void awaitCompletion() {
log.trace("[{}] Awaiting finish", key);
if (isRunning()) {
try {
task.get(30, TimeUnit.SECONDS);
log.trace("[{}] Awaited finish", key);
} catch (Exception e) {
log.warn("[{}] Failed to await for consumer to stop", key, e);
}
task = null;
}
}
public boolean isRunning() {
return task != null;
}
}

View File

@@ -0,0 +1,13 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.service.queue;
import java.io.Serializable;
public enum QueueEvent implements Serializable {
PARTITION_CHANGE, CONFIG_UPDATE, DELETE
}

View File

@@ -0,0 +1,236 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.app.service.queue.consumer;
import jakarta.annotation.PostConstruct;
import jakarta.annotation.PreDestroy;
import lombok.Data;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.ApplicationEventPublisher;
import org.springframework.context.ApplicationListener;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Service;
import sanbing.jcpp.app.service.PileProtocolService;
import sanbing.jcpp.app.service.queue.AbstractConsumerService;
import sanbing.jcpp.app.service.queue.AppConsumerStats;
import sanbing.jcpp.app.service.queue.AppQueueConsumerManager;
import sanbing.jcpp.infrastructure.queue.*;
import sanbing.jcpp.infrastructure.queue.common.QueueConfig;
import sanbing.jcpp.infrastructure.queue.common.TopicPartitionInfo;
import sanbing.jcpp.infrastructure.queue.discovery.PartitionProvider;
import sanbing.jcpp.infrastructure.queue.discovery.event.PartitionChangeEvent;
import sanbing.jcpp.infrastructure.queue.processing.IdMsgPair;
import sanbing.jcpp.infrastructure.queue.provider.AppQueueFactory;
import sanbing.jcpp.infrastructure.stats.StatsFactory;
import sanbing.jcpp.infrastructure.util.annotation.AppComponent;
import sanbing.jcpp.infrastructure.util.codec.ByteUtil;
import sanbing.jcpp.infrastructure.util.mdc.MDCUtils;
import sanbing.jcpp.infrastructure.util.trace.TracerContextUtil;
import sanbing.jcpp.infrastructure.util.trace.TracerRunnable;
import sanbing.jcpp.proto.gen.ProtocolProto.UplinkQueueMessage;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.*;
import java.util.stream.Collectors;
import static sanbing.jcpp.infrastructure.queue.common.QueueConstants.MSG_MD_PREFIX;
import static sanbing.jcpp.infrastructure.queue.common.QueueConstants.MSG_MD_TS;
import static sanbing.jcpp.infrastructure.util.trace.TracerContextUtil.JCPP_TRACER_ID;
import static sanbing.jcpp.infrastructure.util.trace.TracerContextUtil.JCPP_TRACER_ORIGIN;
/**
* @author baigod
*/
@Service
@AppComponent
@Slf4j
public class ProtocolUplinkConsumerService extends AbstractConsumerService implements ApplicationListener<PartitionChangeEvent> {
@Value("${queue.app.poll-interval}")
private int pollInterval;
@Value("${queue.app.pack-processing-timeout}")
private long packProcessingTimeout;
@Value("${queue.app.consumer-per-partition}")
private boolean consumerPerPartition;
@Value("${queue.app.stats.enabled}")
private boolean statsEnabled;
private final PileProtocolService pileProtocolService;
private final AppQueueFactory appQueueFactory;
private AppQueueConsumerManager<ProtoQueueMsg<UplinkQueueMessage>, AppQueueConfig> appConsumer;
private final AppConsumerStats stats;
public ProtocolUplinkConsumerService(PartitionProvider partitionProvider,
ApplicationEventPublisher eventPublisher,
PileProtocolService pileProtocolService,
AppQueueFactory appQueueFactory,
StatsFactory statsFactory) {
super(partitionProvider, eventPublisher);
this.pileProtocolService = pileProtocolService;
this.appQueueFactory = appQueueFactory;
this.stats = new AppConsumerStats(statsFactory);
}
@PostConstruct
public void init() {
super.init("jcpp-app");
log.info("Initializing Protocol Uplink Messages Queue Subscriptions.");
this.appConsumer = AppQueueConsumerManager.<ProtoQueueMsg<UplinkQueueMessage>, AppQueueConfig>builder()
.queueName("protocol uplink")
.config(AppQueueConfig.of(consumerPerPartition, pollInterval))
.msgPackProcessor(this::processMsgs)
.consumerCreator((config, partitionId) -> appQueueFactory.createProtocolUplinkMsgConsumer())
.consumerExecutor(consumersExecutor)
.scheduler(scheduler)
.taskExecutor(mgmtExecutor)
.build();
}
@PreDestroy
public void destroy() {
super.destroy();
}
@Override
protected void stopConsumers() {
super.stopConsumers();
appConsumer.stop();
appConsumer.awaitStop();
}
@Scheduled(fixedDelayString = "${queue.app.stats.print-interval-ms}")
public void printStats() {
if (statsEnabled) {
stats.printStats();
stats.reset();
}
}
private void processMsgs(List<ProtoQueueMsg<UplinkQueueMessage>> msgs, QueueConsumer<ProtoQueueMsg<UplinkQueueMessage>> consumer, AppQueueConfig config) throws Exception {
List<IdMsgPair<UplinkQueueMessage>> orderedMsgList = msgs.stream().map(msg -> new IdMsgPair<>(UUID.randomUUID(), msg)).toList();
ConcurrentMap<UUID, ProtoQueueMsg<UplinkQueueMessage>> pendingMap = orderedMsgList.stream().collect(
Collectors.toConcurrentMap(IdMsgPair::getUuid, IdMsgPair::getMsg));
CountDownLatch processingTimeoutLatch = new CountDownLatch(1);
PackProcessingContext<ProtoQueueMsg<UplinkQueueMessage>> ctx = new PackProcessingContext<>(
processingTimeoutLatch, pendingMap, new ConcurrentHashMap<>());
PendingMsgHolder pendingMsgHolder = new PendingMsgHolder();
Future<?> packSubmitFuture = consumersExecutor.submit(new TracerRunnable(() ->
orderedMsgList.forEach((element) -> {
UUID id = element.getUuid();
ProtoQueueMsg<UplinkQueueMessage> msg = element.getMsg();
tracer(msg);
log.trace("[{}] Creating main callback for message: {}", id, msg.getValue());
Callback callback = new PackCallback<>(id, ctx);
try {
UplinkQueueMessage uplinkQueueMsg = msg.getValue();
pendingMsgHolder.setUplinkQueueMessage(uplinkQueueMsg);
if (uplinkQueueMsg.hasLoginRequest()) {
pileProtocolService.pileLogin(uplinkQueueMsg, callback);
} else if (uplinkQueueMsg.hasHeartBeatRequest()) {
pileProtocolService.heartBeat(uplinkQueueMsg, callback);
} else if (uplinkQueueMsg.hasVerifyPricingRequest()) {
pileProtocolService.verifyPricing(uplinkQueueMsg, callback);
} else if (uplinkQueueMsg.hasQueryPricingRequest()) {
pileProtocolService.queryPricing(uplinkQueueMsg, callback);
} else if (uplinkQueueMsg.hasGunRunStatusProto()) {
pileProtocolService.postGunRunStatus(uplinkQueueMsg, callback);
} else if (uplinkQueueMsg.hasChargingProgressProto()) {
pileProtocolService.postChargingProgress(uplinkQueueMsg, callback);
} else if (uplinkQueueMsg.hasSetPricingResponse()) {
pileProtocolService.onSetPricingResponse(uplinkQueueMsg, callback);
} else if (uplinkQueueMsg.hasRemoteStartChargingResponse()) {
pileProtocolService.onRemoteStartChargingResponse(uplinkQueueMsg, callback);
} else if (uplinkQueueMsg.hasRemoteStopChargingResponse()) {
pileProtocolService.onRemoteStopChargingResponse(uplinkQueueMsg, callback);
} else if(uplinkQueueMsg.hasTransactionRecord()){
pileProtocolService.onTransactionRecord(uplinkQueueMsg, callback);
}else {
callback.onSuccess();
}
if (statsEnabled) {
stats.log(uplinkQueueMsg);
}
} catch (Throwable e) {
log.warn("[{}] Failed to process message: {}", id, msg, e);
callback.onFailure(e);
}
}))
);
if (!processingTimeoutLatch.await(packProcessingTimeout, TimeUnit.MILLISECONDS)) {
if (!packSubmitFuture.isDone()) {
packSubmitFuture.cancel(true);
UplinkQueueMessage lastSubmitMsg = pendingMsgHolder.getUplinkQueueMessage();
log.warn("Timeout to process message: {}", lastSubmitMsg);
}
if (log.isDebugEnabled()) {
ctx.getAckMap().forEach((id, msg) -> log.debug("[{}] Timeout to process message: {}", id, msg.getValue()));
}
ctx.getFailedMap().forEach((id, msg) -> log.warn("[{}] Failed to process message: {}", id, msg.getValue()));
}
consumer.commit();
}
private void tracer(ProtoQueueMsg<UplinkQueueMessage> msg) {
Optional.ofNullable(msg.getHeaders().get(MSG_MD_PREFIX + JCPP_TRACER_ID))
.map(tracerId -> {
String origin = null;
byte[] tracerOrigin = msg.getHeaders().get(MSG_MD_PREFIX + JCPP_TRACER_ORIGIN);
if (tracerOrigin != null) {
origin = ByteUtil.bytesToString(tracerOrigin);
}
long ts = System.currentTimeMillis();
byte[] tracerTs = msg.getHeaders().get(MSG_MD_PREFIX + MSG_MD_TS);
if (tracerTs != null) {
ts = ByteUtil.bytesToLong(tracerTs);
}
return TracerContextUtil.newTracer(ByteUtil.bytesToString(tracerId), origin, ts);
})
.orElseGet(TracerContextUtil::newTracer);
MDCUtils.recordTracer();
}
@Override
protected int getMgmtThreadPoolSize() {
return Math.max(Runtime.getRuntime().availableProcessors(), 4);
}
@Override
protected void onJCPPApplicationEvent(PartitionChangeEvent event) {
Set<TopicPartitionInfo> appPartitions = event.getAppPartitions();
log.info("Subscribing to partitions: {}", appPartitions);
appConsumer.update(appPartitions);
}
@Data(staticConstructor = "of")
public static class AppQueueConfig implements QueueConfig {
private final boolean consumerPerPartition;
private final int pollInterval;
}
@Setter
@Getter
private static class PendingMsgHolder {
private volatile UplinkQueueMessage uplinkQueueMessage;
}
}

View File

@@ -0,0 +1,51 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
抖音关注:程序员三丙
知识星球https://t.zsxq.com/j9b21
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>sanbing</groupId>
<artifactId>jcpp-parent</artifactId>
<version>1.0.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>jcpp-infrastructure-cache</artifactId>
<packaging>jar</packaging>
<name>JChargePointProtocol Infrastructure Cache Module</name>
<description>基础缓存管理模块</description>
<properties>
<main.dir>${basedir}/..</main.dir>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.data</groupId>
<artifactId>spring-data-redis</artifactId>
</dependency>
<dependency>
<groupId>io.lettuce</groupId>
<artifactId>lettuce-core</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-pool2</artifactId>
</dependency>
<dependency>
<groupId>sanbing</groupId>
<artifactId>jcpp-infrastructure-util</artifactId>
</dependency>
<dependency>
<groupId>com.github.ben-manes.caffeine</groupId>
<artifactId>caffeine</artifactId>
</dependency>
</dependencies>
</project>

View File

@@ -0,0 +1,13 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.cache;
public final class CacheConstants {
public static final String PILE_CACHE = "piles";
public static final String PILE_SESSION_CACHE = "pileSessions";
}

View File

@@ -0,0 +1,13 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.cache;
import lombok.Data;
@Data
public class CacheSpecs {
private Integer timeToLiveInMinutes;
private Integer maxSize;
}

View File

@@ -0,0 +1,22 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.cache;
import lombok.Data;
import lombok.Getter;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Configuration;
import java.util.Map;
@Configuration
@ConfigurationProperties(prefix = "cache")
@Data
public class CacheSpecsMap {
@Getter
private Map<String, CacheSpecs> specs;
}

View File

@@ -0,0 +1,15 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.cache;
public interface CacheTransaction<K, V> {
void put(K key, V value);
boolean commit();
void rollback();
}

View File

@@ -0,0 +1,11 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.cache;
public interface CacheValueWrapper<T> {
T get();
}

View File

@@ -0,0 +1,48 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.cache;
import lombok.Getter;
import lombok.RequiredArgsConstructor;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import java.io.Serializable;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
@Slf4j
@RequiredArgsConstructor
public class CaffeineCacheTransaction<K extends Serializable, V extends Serializable> implements CacheTransaction<K, V> {
@Getter
private final UUID id = UUID.randomUUID();
private final CaffeineTransactionalCache<K, V> cache;
@Getter
private final List<K> keys;
@Getter
@Setter
private boolean failed;
private final Map<K, V> pendingPuts = new LinkedHashMap<>();
@Override
public void put(K key, V value) {
pendingPuts.put(key, value);
}
@Override
public boolean commit() {
return cache.commit(id, pendingPuts);
}
@Override
public void rollback() {
cache.rollback(id);
}
}

View File

@@ -0,0 +1,180 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.cache;
import lombok.Getter;
import lombok.RequiredArgsConstructor;
import org.springframework.cache.Cache;
import org.springframework.cache.CacheManager;
import java.io.Serializable;
import java.util.*;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
@RequiredArgsConstructor
public abstract class CaffeineTransactionalCache<K extends Serializable, V extends Serializable> implements TransactionalCache<K, V> {
@Getter
protected final String cacheName;
protected final Cache cache;
protected final Lock lock = new ReentrantLock();
private final Map<K, Set<UUID>> objectTransactions = new HashMap<>();
private final Map<UUID, CaffeineCacheTransaction<K, V>> transactions = new HashMap<>();
public CaffeineTransactionalCache(CacheManager cacheManager, String cacheName) {
this.cacheName = cacheName;
this.cache = Optional.ofNullable(cacheManager.getCache(cacheName))
.orElseThrow(() -> new IllegalArgumentException("Cache '" + cacheName + "' is not configured"));
}
@Override
public CacheValueWrapper<V> get(K key) {
return SimpleCacheValueWrapper.wrap(cache.get(key));
}
@Override
public void put(K key, V value) {
lock.lock();
try {
failAllTransactionsByKey(key);
cache.put(key, value);
} finally {
lock.unlock();
}
}
@Override
public void putIfAbsent(K key, V value) {
lock.lock();
try {
failAllTransactionsByKey(key);
doPutIfAbsent(key, value);
} finally {
lock.unlock();
}
}
@Override
public void evict(K key) {
lock.lock();
try {
failAllTransactionsByKey(key);
doEvict(key);
} finally {
lock.unlock();
}
}
@Override
public void evict(Collection<K> keys) {
lock.lock();
try {
keys.forEach(key -> {
failAllTransactionsByKey(key);
doEvict(key);
});
} finally {
lock.unlock();
}
}
@Override
public void evictOrPut(K key, V value) {
evict(key);
}
@Override
public CacheTransaction<K, V> newTransactionForKey(K key) {
return newTransaction(Collections.singletonList(key));
}
@Override
public CacheTransaction<K, V> newTransactionForKeys(List<K> keys) {
return newTransaction(keys);
}
void doPutIfAbsent(K key, V value) {
cache.putIfAbsent(key, value);
}
void doEvict(K key) {
cache.evict(key);
}
CacheTransaction<K, V> newTransaction(List<K> keys) {
lock.lock();
try {
var transaction = new CaffeineCacheTransaction<>(this, keys);
var transactionId = transaction.getId();
for (K key : keys) {
objectTransactions.computeIfAbsent(key, k -> new HashSet<>()).add(transactionId);
}
transactions.put(transactionId, transaction);
return transaction;
} finally {
lock.unlock();
}
}
public boolean commit(UUID trId, Map<K, V> pendingPuts) {
lock.lock();
try {
var tr = transactions.get(trId);
var success = !tr.isFailed();
if (success) {
for (K key : tr.getKeys()) {
Set<UUID> otherTransactions = objectTransactions.get(key);
if (otherTransactions != null) {
for (UUID otherTrId : otherTransactions) {
if (trId == null || !trId.equals(otherTrId)) {
transactions.get(otherTrId).setFailed(true);
}
}
}
}
pendingPuts.forEach(this::doPutIfAbsent);
}
removeTransaction(trId);
return success;
} finally {
lock.unlock();
}
}
void rollback(UUID id) {
lock.lock();
try {
removeTransaction(id);
} finally {
lock.unlock();
}
}
private void removeTransaction(UUID id) {
CaffeineCacheTransaction<K, V> transaction = transactions.remove(id);
if (transaction != null) {
for (var key : transaction.getKeys()) {
Set<UUID> transactions = objectTransactions.get(key);
if (transactions != null) {
transactions.remove(id);
if (transactions.isEmpty()) {
objectTransactions.remove(key);
}
}
}
}
}
protected void failAllTransactionsByKey(K key) {
Set<UUID> transactionsIds = objectTransactions.get(key);
if (transactionsIds != null) {
for (UUID otherTrId : transactionsIds) {
transactions.get(otherTrId).setFailed(true);
}
}
}
}

View File

@@ -0,0 +1,14 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.cache;
public interface HasVersion {
Integer getVersion();
default void setVersion(Integer version) {
}
}

View File

@@ -0,0 +1,83 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.cache;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.RemovalCause;
import com.github.benmanes.caffeine.cache.Ticker;
import com.github.benmanes.caffeine.cache.Weigher;
import lombok.extern.slf4j.Slf4j;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.cache.CacheManager;
import org.springframework.cache.annotation.EnableCaching;
import org.springframework.cache.caffeine.CaffeineCache;
import org.springframework.cache.support.SimpleCacheManager;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
@Configuration
@ConditionalOnProperty(prefix = "cache", value = "type", havingValue = "caffeine", matchIfMissing = true)
@EnableCaching
@Slf4j
public class JCPPCaffeineCacheConfiguration {
private final CacheSpecsMap configuration;
public JCPPCaffeineCacheConfiguration(CacheSpecsMap configuration) {
this.configuration = configuration;
}
@Bean
public CacheManager cacheManager() {
log.info("Initializing cache: {} specs {}", Arrays.toString(RemovalCause.values()), configuration.getSpecs());
SimpleCacheManager manager = new SimpleCacheManager();
if (configuration.getSpecs() != null) {
List<CaffeineCache> caches =
configuration.getSpecs().entrySet().stream()
.map(entry -> buildCache(entry.getKey(),
entry.getValue()))
.collect(Collectors.toList());
manager.setCaches(caches);
}
manager.initializeCaches();
return manager;
}
private CaffeineCache buildCache(String name, CacheSpecs cacheSpec) {
final Caffeine<Object, Object> caffeineBuilder
= Caffeine.newBuilder()
.weigher(collectionSafeWeigher())
.maximumWeight(cacheSpec.getMaxSize())
.ticker(ticker());
if (!cacheSpec.getTimeToLiveInMinutes().equals(0)) {
caffeineBuilder.expireAfterWrite(cacheSpec.getTimeToLiveInMinutes(), TimeUnit.MINUTES);
}
return new CaffeineCache(name, caffeineBuilder.build());
}
@Bean
public Ticker ticker() {
return Ticker.systemTicker();
}
private Weigher<? super Object, ? super Object> collectionSafeWeigher() {
return (Weigher<Object, Object>) (key, value) -> {
if (value instanceof Collection) {
return ((Collection<?>) value).size();
}
return 1;
};
}
}

View File

@@ -0,0 +1,54 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.cache;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.connection.RedisClusterConfiguration;
import org.springframework.data.redis.connection.lettuce.LettuceConnectionFactory;
import org.springframework.data.redis.connection.lettuce.LettucePoolingClientConfiguration;
@Configuration
@ConditionalOnExpression("'${cache.type:null}'=='redis' && '${redis.connection.type:null}'=='cluster'")
@Slf4j
public class JCPPJCPPRedisClusterConfiguration extends JCPPRedisCacheConfiguration {
@Value("${redis.cluster.nodes:}")
private String clusterNodes;
@Value("${redis.cluster.max-redirects:12}")
private Integer maxRedirects;
@Value("${redis.cluster.useDefaultPoolConfig:true}")
private boolean useDefaultPoolConfig;
@Value("${redis.password:}")
private String password;
@Override
public LettuceConnectionFactory loadFactory() {
log.info("Initializing Redis Cluster on {}", clusterNodes);
RedisClusterConfiguration clusterConfiguration = new RedisClusterConfiguration();
clusterConfiguration.setClusterNodes(getNodes(clusterNodes));
clusterConfiguration.setMaxRedirects(maxRedirects);
clusterConfiguration.setPassword(password);
return new LettuceConnectionFactory(clusterConfiguration, buildClientConfig());
}
private LettucePoolingClientConfiguration buildClientConfig() {
var clientConfigurationBuilder = LettucePoolingClientConfiguration.builder();
if (!useDefaultPoolConfig) {
clientConfigurationBuilder
.poolConfig(buildPoolConfig());
}
return clientConfigurationBuilder
.build();
}
}

View File

@@ -0,0 +1,59 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.cache;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.connection.RedisSentinelConfiguration;
import org.springframework.data.redis.connection.lettuce.LettuceConnectionFactory;
import org.springframework.data.redis.connection.lettuce.LettucePoolingClientConfiguration;
@Configuration
@ConditionalOnMissingBean(JCPPCaffeineCacheConfiguration.class)
@ConditionalOnProperty(prefix = "redis.connection", value = "type", havingValue = "sentinel")
@Slf4j
public class JCPPJCPPRedisSentinelConfiguration extends JCPPRedisCacheConfiguration {
@Value("${redis.sentinel.master:}")
private String master;
@Value("${redis.sentinel.sentinels:}")
private String sentinels;
@Value("${redis.sentinel.password:}")
private String sentinelPassword;
@Value("${redis.sentinel.useDefaultPoolConfig:true}")
private boolean useDefaultPoolConfig;
@Value("${redis.db:}")
private Integer database;
@Value("${redis.password:}")
private String password;
@Override
public LettuceConnectionFactory loadFactory() {
log.info("Initializing Redis Sentinel on {}, sentinels: {}", master, sentinels);
RedisSentinelConfiguration redisSentinelConfiguration = new RedisSentinelConfiguration();
redisSentinelConfiguration.setMaster(master);
redisSentinelConfiguration.setSentinels(getNodes(sentinels));
redisSentinelConfiguration.setSentinelPassword(sentinelPassword);
redisSentinelConfiguration.setPassword(password);
redisSentinelConfiguration.setDatabase(database);
return new LettuceConnectionFactory(redisSentinelConfiguration, buildClientConfig());
}
private LettucePoolingClientConfiguration buildClientConfig() {
var clientConfigurationBuilder = LettucePoolingClientConfiguration.builder();
if (!useDefaultPoolConfig) {
clientConfigurationBuilder.poolConfig(buildPoolConfig());
}
return clientConfigurationBuilder.build();
}
}

View File

@@ -0,0 +1,78 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.cache;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.connection.RedisStandaloneConfiguration;
import org.springframework.data.redis.connection.lettuce.LettuceConnectionFactory;
import org.springframework.data.redis.connection.lettuce.LettucePoolingClientConfiguration;
import java.time.Duration;
@Configuration
@ConditionalOnMissingBean(JCPPCaffeineCacheConfiguration.class)
@ConditionalOnProperty(prefix = "redis.connection", value = "type", havingValue = "standalone")
@Slf4j
public class JCPPJCPPRedisStandaloneConfiguration extends JCPPRedisCacheConfiguration {
@Value("${redis.standalone.host:localhost}")
private String host;
@Value("${redis.standalone.port:6379}")
private Integer port;
@Value("${redis.standalone.clientName:standalone}")
private String clientName;
@Value("${redis.standalone.commandTimeout:30000}")
private Long commandTimeout;
@Value("${redis.standalone.shutdownTimeout:5000}")
private Long shutdownTimeout;
@Value("${redis.standalone.useDefaultClientConfig:true}")
private boolean useDefaultClientConfig;
@Value("${redis.standalone.usePoolConfig:false}")
private boolean usePoolConfig;
@Value("${redis.db:0}")
private Integer db;
@Value("${redis.password:}")
private String password;
@Override
public LettuceConnectionFactory loadFactory() {
log.info("Initializing Redis Standalone on {}:{}", host, port);
RedisStandaloneConfiguration standaloneConfiguration = new RedisStandaloneConfiguration();
standaloneConfiguration.setHostName(host);
standaloneConfiguration.setPort(port);
standaloneConfiguration.setDatabase(db);
standaloneConfiguration.setPassword(password);
return new LettuceConnectionFactory(standaloneConfiguration, buildClientConfig());
}
private LettucePoolingClientConfiguration buildClientConfig() {
var clientConfigurationBuilder = LettucePoolingClientConfiguration.builder();
if (!useDefaultClientConfig) {
clientConfigurationBuilder
.clientName(clientName)
.commandTimeout(Duration.ofMillis(commandTimeout))
.shutdownTimeout(Duration.ofMillis(shutdownTimeout));
}
if (usePoolConfig) {
clientConfigurationBuilder.poolConfig(buildPoolConfig());
}
return clientConfigurationBuilder.build();
}
}

View File

@@ -0,0 +1,168 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.cache;
import io.lettuce.core.api.StatefulRedisConnection;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.pool2.impl.GenericObjectPoolConfig;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.cache.CacheManager;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.convert.converter.ConverterRegistry;
import org.springframework.data.redis.cache.RedisCacheConfiguration;
import org.springframework.data.redis.cache.RedisCacheManager;
import org.springframework.data.redis.connection.ReactiveRedisConnectionFactory;
import org.springframework.data.redis.connection.RedisConnectionFactory;
import org.springframework.data.redis.connection.RedisNode;
import org.springframework.data.redis.connection.lettuce.LettuceConnectionFactory;
import org.springframework.data.redis.core.ReactiveRedisTemplate;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.serializer.GenericJackson2JsonRedisSerializer;
import org.springframework.data.redis.serializer.RedisSerializationContext;
import org.springframework.data.redis.serializer.StringRedisSerializer;
import org.springframework.format.support.DefaultFormattingConversionService;
import org.springframework.util.Assert;
import org.springframework.util.StringUtils;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.UUID;
@Configuration
@ConditionalOnProperty(prefix = "cache", value = "type", havingValue = "redis")
@Data
@Slf4j
public abstract class JCPPRedisCacheConfiguration {
private static final String COMMA = ",";
private static final String COLON = ":";
@Value("${redis.evictTtlInMs:60000}")
private int evictTtlInMs;
@Value("${redis.pool_config.maxTotal:128}")
private int maxTotal;
@Value("${redis.pool_config.maxIdle:128}")
private int maxIdle;
@Value("${redis.pool_config.minIdle:16}")
private int minIdle;
@Value("${redis.pool_config.testOnBorrow:true}")
private boolean testOnBorrow;
@Value("${redis.pool_config.testOnReturn:true}")
private boolean testOnReturn;
@Value("${redis.pool_config.testWhileIdle:true}")
private boolean testWhileIdle;
@Value("${redis.pool_config.minEvictableMs:60000}")
private long minEvictableMs;
@Value("${redis.pool_config.evictionRunsMs:30000}")
private long evictionRunsMs;
@Value("${redis.pool_config.maxWaitMills:60000}")
private long maxWaitMills;
@Value("${redis.pool_config.numberTestsPerEvictionRun:3}")
private int numberTestsPerEvictionRun;
@Value("${redis.pool_config.blockWhenExhausted:true}")
private boolean blockWhenExhausted;
@Bean
public ReactiveRedisConnectionFactory reactiveRedisConnectionFactory(LettuceConnectionFactory loadFactory) {
return loadFactory;
}
@Bean
public RedisConnectionFactory redisConnectionFactory(LettuceConnectionFactory loadFactory) {
return loadFactory;
}
@Bean
protected abstract LettuceConnectionFactory loadFactory();
@Bean
public CacheManager cacheManager(RedisConnectionFactory redisConnectionFactory) {
DefaultFormattingConversionService redisConversionService = new DefaultFormattingConversionService();
RedisCacheConfiguration.registerDefaultConverters(redisConversionService);
registerDefaultConverters(redisConversionService);
RedisCacheConfiguration configuration = RedisCacheConfiguration.defaultCacheConfig().withConversionService(redisConversionService);
return RedisCacheManager.builder(redisConnectionFactory).cacheDefaults(configuration)
.transactionAware()
.build();
}
@Bean
public ReactiveRedisTemplate<String, Object> reactiveRedisTemplate(ReactiveRedisConnectionFactory reactiveRedisConnectionFactory) {
RedisSerializationContext<String, Object> serializationContext = RedisSerializationContext
.<String, Object>newSerializationContext()
.key(new StringRedisSerializer())
.value(new GenericJackson2JsonRedisSerializer())
.hashKey(new StringRedisSerializer())
.hashValue(new GenericJackson2JsonRedisSerializer())
.build();
return new ReactiveRedisTemplate<>(reactiveRedisConnectionFactory, serializationContext);
}
@Bean
public RedisTemplate<String, Object> redisTemplate(RedisConnectionFactory redisConnectionFactory) {
RedisTemplate<String, Object> template = new RedisTemplate<>();
template.setKeySerializer(new StringRedisSerializer());
template.setValueSerializer(new GenericJackson2JsonRedisSerializer());
template.setHashKeySerializer(new StringRedisSerializer());
template.setHashValueSerializer(new GenericJackson2JsonRedisSerializer());
template.setConnectionFactory(redisConnectionFactory);
return template;
}
private static void registerDefaultConverters(ConverterRegistry registry) {
Assert.notNull(registry, "ConverterRegistry must not be null!");
registry.addConverter(UUID.class, String.class, UUID::toString);
}
protected GenericObjectPoolConfig<StatefulRedisConnection<String, String>> buildPoolConfig() {
GenericObjectPoolConfig<StatefulRedisConnection<String, String>> poolConfig = new GenericObjectPoolConfig<>();
poolConfig.setMaxTotal(maxTotal);
poolConfig.setMaxIdle(maxIdle);
poolConfig.setMinIdle(minIdle);
poolConfig.setTestOnBorrow(testOnBorrow);
poolConfig.setTestOnReturn(testOnReturn);
poolConfig.setTestWhileIdle(testWhileIdle);
poolConfig.setSoftMinEvictableIdleDuration(Duration.ofMillis(minEvictableMs));
poolConfig.setTimeBetweenEvictionRuns(Duration.ofMillis(evictionRunsMs));
poolConfig.setMaxWait(Duration.ofMillis(maxWaitMills));
poolConfig.setNumTestsPerEvictionRun(numberTestsPerEvictionRun);
poolConfig.setBlockWhenExhausted(blockWhenExhausted);
return poolConfig;
}
protected List<RedisNode> getNodes(String nodes) {
List<RedisNode> result;
if (!StringUtils.hasText(nodes)) {
result = Collections.emptyList();
} else {
result = new ArrayList<>();
for (String hostPort : nodes.split(COMMA)) {
String host = hostPort.split(COLON)[0];
int port = Integer.parseInt(hostPort.split(COLON)[1]);
result.add(new RedisNode(host, port));
}
}
return result;
}
}

View File

@@ -0,0 +1,18 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.cache;
import org.springframework.data.redis.serializer.SerializationException;
import org.springframework.lang.Nullable;
public interface JCPPRedisSerializer<K, T> {
@Nullable
byte[] serialize(@Nullable T t) throws SerializationException;
@Nullable
T deserialize(K key, @Nullable byte[] bytes) throws SerializationException;
}

View File

@@ -0,0 +1,45 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.cache;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.data.redis.connection.RedisConnection;
import java.io.Serializable;
import java.util.Objects;
@Slf4j
@RequiredArgsConstructor
public class RedisCacheTransaction<K extends Serializable, V extends Serializable> implements CacheTransaction<K, V> {
private final RedisTransactionalCache<K, V> cache;
private final RedisConnection connection;
@Override
public void put(K key, V value) {
cache.put(key, value, connection);
}
@Override
public boolean commit() {
try {
var execResult = connection.exec();
return execResult.stream().anyMatch(Objects::nonNull);
} finally {
connection.close();
}
}
@Override
public void rollback() {
try {
connection.discard();
} finally {
connection.close();
}
}
}

View File

@@ -0,0 +1,246 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.cache;
import io.lettuce.core.RedisAsyncCommandsImpl;
import io.lettuce.core.RedisClient;
import io.lettuce.core.cluster.RedisAdvancedClusterAsyncCommandsImpl;
import io.lettuce.core.cluster.api.async.RedisClusterAsyncCommands;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.springframework.cache.support.NullValue;
import org.springframework.data.redis.connection.RedisClusterNode;
import org.springframework.data.redis.connection.RedisConnection;
import org.springframework.data.redis.connection.RedisStringCommands;
import org.springframework.data.redis.connection.lettuce.LettuceConnection;
import org.springframework.data.redis.connection.lettuce.LettuceConnectionFactory;
import org.springframework.data.redis.core.types.Expiration;
import org.springframework.data.redis.serializer.RedisSerializer;
import org.springframework.data.redis.serializer.StringRedisSerializer;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.function.Supplier;
@Slf4j
public abstract class RedisTransactionalCache<K extends Serializable, V extends Serializable> implements TransactionalCache<K, V> {
static final byte[] BINARY_NULL_VALUE = RedisSerializer.java().serialize(NullValue.INSTANCE);
@Getter
private final String cacheName;
@Getter
private final LettuceConnectionFactory connectionFactory;
private final RedisSerializer<String> keySerializer = StringRedisSerializer.UTF_8;
private final JCPPRedisSerializer<K, V> valueSerializer;
protected final Expiration evictExpiration;
protected final Expiration cacheTtl;
protected final boolean cacheEnabled;
public RedisTransactionalCache(String cacheName,
CacheSpecsMap cacheSpecsMap,
LettuceConnectionFactory connectionFactory,
JCPPRedisCacheConfiguration configuration,
JCPPRedisSerializer<K, V> valueSerializer) {
this.cacheName = cacheName;
this.connectionFactory = connectionFactory;
this.valueSerializer = valueSerializer;
this.evictExpiration = Expiration.from(configuration.getEvictTtlInMs(), TimeUnit.MILLISECONDS);
this.cacheTtl = Optional.ofNullable(cacheSpecsMap)
.map(CacheSpecsMap::getSpecs)
.map(specs -> specs.get(cacheName))
.map(CacheSpecs::getTimeToLiveInMinutes)
.filter(ttl -> !ttl.equals(0))
.map(ttl -> Expiration.from(ttl, TimeUnit.MINUTES))
.orElseGet(Expiration::persistent);
this.cacheEnabled = Optional.ofNullable(cacheSpecsMap)
.map(CacheSpecsMap::getSpecs)
.map(x -> x.get(cacheName))
.map(CacheSpecs::getMaxSize)
.map(size -> size > 0)
.orElse(false);
}
@Override
public CacheValueWrapper<V> get(K key) {
if (!cacheEnabled) {
return null;
}
try (var connection = connectionFactory.getConnection()) {
byte[] rawValue = doGet(key, connection);
if (rawValue == null || rawValue.length == 0) {
return null;
} else if (Arrays.equals(rawValue, BINARY_NULL_VALUE)) {
return SimpleCacheValueWrapper.empty();
} else {
V value = valueSerializer.deserialize(key, rawValue);
return SimpleCacheValueWrapper.wrap(value);
}
}
}
protected byte[] doGet(K key, RedisConnection connection) {
return connection.stringCommands().get(getRawKey(key));
}
@Override
public void put(K key, V value) {
if (!cacheEnabled) {
return;
}
try (var connection = connectionFactory.getConnection()) {
put(key, value, connection);
}
}
public void put(K key, V value, RedisConnection connection) {
put(connection, key, value, RedisStringCommands.SetOption.UPSERT);
}
@Override
public void putIfAbsent(K key, V value) {
if (!cacheEnabled) {
return;
}
try (var connection = connectionFactory.getConnection()) {
put(connection, key, value, RedisStringCommands.SetOption.SET_IF_ABSENT);
}
}
@Override
public void evict(K key) {
if (!cacheEnabled) {
return;
}
try (var connection = connectionFactory.getConnection()) {
connection.keyCommands().del(getRawKey(key));
}
}
@Override
public void evict(Collection<K> keys) {
if (!cacheEnabled) {
return;
}
if (keys.isEmpty()) {
return;
}
try (var connection = connectionFactory.getConnection()) {
connection.keyCommands().del(keys.stream().map(this::getRawKey).toArray(byte[][]::new));
}
}
@Override
public void evictOrPut(K key, V value) {
if (!cacheEnabled) {
return;
}
try (var connection = connectionFactory.getConnection()) {
var rawKey = getRawKey(key);
var records = connection.keyCommands().del(rawKey);
if (records == null || records == 0) {
//We need to put the value in case of Redis, because evict will NOT cancel concurrent transaction used to "get" the missing value from cache.
connection.stringCommands().set(rawKey, getRawValue(value), evictExpiration, RedisStringCommands.SetOption.UPSERT);
}
}
}
@Override
public CacheTransaction<K, V> newTransactionForKey(K key) {
byte[][] rawKey = new byte[][]{getRawKey(key)};
RedisConnection connection = watch(rawKey);
return new RedisCacheTransaction<>(this, connection);
}
@Override
public CacheTransaction<K, V> newTransactionForKeys(List<K> keys) {
RedisConnection connection = watch(keys.stream().map(this::getRawKey).toArray(byte[][]::new));
return new RedisCacheTransaction<>(this, connection);
}
@Override
public <R> R getAndPutInTransaction(K key, Supplier<R> dbCall, Function<V, R> cacheValueToResult, Function<R, V> dbValueToCacheValue, boolean cacheNullValue) {
if (!cacheEnabled) {
return dbCall.get();
}
return TransactionalCache.super.getAndPutInTransaction(key, dbCall, cacheValueToResult, dbValueToCacheValue, cacheNullValue);
}
@SuppressWarnings("unchecked")
protected RedisConnection getConnection(byte[] rawKey) {
if (!connectionFactory.isClusterAware()) {
return connectionFactory.getConnection();
}
RedisClusterNode redisClusterNode = connectionFactory.getClusterConnection().clusterGetNodeForKey(rawKey);
Object nativeConnection = connectionFactory.getConnection().getNativeConnection();
RedisClusterAsyncCommands<?,?> connection = ((RedisAdvancedClusterAsyncCommandsImpl<?,?>) nativeConnection).getConnection(redisClusterNode.getId());
LettuceConnection lettuceConnection = new LettuceConnection(((RedisAsyncCommandsImpl) connection).getStatefulConnection(),
connectionFactory.getTimeout(),
RedisClient.create());
lettuceConnection.setConvertPipelineAndTxResults(connectionFactory.getConvertPipelineAndTxResults());
return lettuceConnection;
}
protected RedisConnection watch(byte[][] rawKeysList) {
RedisConnection connection = getConnection(rawKeysList[0]);
try {
connection.watch(rawKeysList);
connection.multi();
} catch (Exception e) {
connection.close();
throw e;
}
return connection;
}
protected byte[] getRawKey(K key) {
String keyString = cacheName + key.toString();
byte[] rawKey;
try {
rawKey = keySerializer.serialize(keyString);
} catch (Exception e) {
log.warn("Failed to serialize the cache key: {}", key, e);
throw new RuntimeException(e);
}
if (rawKey == null) {
log.warn("Failed to serialize the cache key: {}", key);
throw new IllegalArgumentException("Failed to serialize the cache key!");
}
return rawKey;
}
protected byte[] getRawValue(V value) {
if (value == null) {
return BINARY_NULL_VALUE;
} else {
try {
return valueSerializer.serialize(value);
} catch (Exception e) {
log.warn("Failed to serialize the cache value: {}", value, e);
throw new RuntimeException(e);
}
}
}
public void put(RedisConnection connection, K key, V value, RedisStringCommands.SetOption setOption) {
if (!cacheEnabled) {
return;
}
byte[] rawKey = getRawKey(key);
put(connection, rawKey, value, setOption);
}
public void put(RedisConnection connection, byte[] rawKey, V value, RedisStringCommands.SetOption setOption) {
byte[] rawValue = getRawValue(value);
connection.stringCommands().set(rawKey, rawValue, this.cacheTtl, setOption);
}
}

View File

@@ -0,0 +1,35 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.cache;
import lombok.AccessLevel;
import lombok.RequiredArgsConstructor;
import lombok.ToString;
import org.springframework.cache.Cache;
@ToString
@RequiredArgsConstructor(access = AccessLevel.PRIVATE)
public class SimpleCacheValueWrapper<T> implements CacheValueWrapper<T> {
private final T value;
@Override
public T get() {
return value;
}
public static <T> SimpleCacheValueWrapper<T> empty() {
return new SimpleCacheValueWrapper<>(null);
}
public static <T> SimpleCacheValueWrapper<T> wrap(T value) {
return new SimpleCacheValueWrapper<>(value);
}
@SuppressWarnings("unchecked")
public static <T> SimpleCacheValueWrapper<T> wrap(Cache.ValueWrapper source) {
return source == null ? null : new SimpleCacheValueWrapper<>((T) source.get());
}
}

View File

@@ -0,0 +1,86 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.cache;
import java.io.Serializable;
import java.util.Collection;
import java.util.List;
import java.util.function.Function;
import java.util.function.Supplier;
public interface TransactionalCache<K extends Serializable, V extends Serializable> {
String getCacheName();
CacheValueWrapper<V> get(K key);
void put(K key, V value);
void putIfAbsent(K key, V value);
void evict(K key);
void evict(Collection<K> keys);
void evictOrPut(K key, V value);
CacheTransaction<K, V> newTransactionForKey(K key);
CacheTransaction<K, V> newTransactionForKeys(List<K> keys);
default V getOrFetchFromDB(K key, Supplier<V> dbCall, boolean cacheNullValue, boolean putToCache) {
if (putToCache) {
return getAndPutInTransaction(key, dbCall, cacheNullValue);
} else {
CacheValueWrapper<V> cacheValueWrapper = get(key);
if (cacheValueWrapper != null) {
return cacheValueWrapper.get();
}
return dbCall.get();
}
}
default V getAndPutInTransaction(K key, Supplier<V> dbCall, boolean cacheNullValue) {
return getAndPutInTransaction(key, dbCall, Function.identity(), Function.identity(), cacheNullValue);
}
default <R> R getAndPutInTransaction(K key, Supplier<R> dbCall, Function<V, R> cacheValueToResult, Function<R, V> dbValueToCacheValue, boolean cacheNullValue) {
CacheValueWrapper<V> cacheValueWrapper = get(key);
if (cacheValueWrapper != null) {
V cacheValue = cacheValueWrapper.get();
return cacheValue != null ? cacheValueToResult.apply(cacheValue) : null;
}
var cacheTransaction = newTransactionForKey(key);
try {
R dbValue = dbCall.get();
if (dbValue != null || cacheNullValue) {
cacheTransaction.put(key, dbValueToCacheValue.apply(dbValue));
cacheTransaction.commit();
return dbValue;
} else {
cacheTransaction.rollback();
return null;
}
} catch (Throwable e) {
cacheTransaction.rollback();
throw e;
}
}
default <R> R getOrFetchFromDB(K key, Supplier<R> dbCall, Function<V, R> cacheValueToResult, Function<R, V> dbValueToCacheValue, boolean cacheNullValue, boolean putToCache) {
if (putToCache) {
return getAndPutInTransaction(key, dbCall, cacheValueToResult, dbValueToCacheValue, cacheNullValue);
} else {
CacheValueWrapper<V> cacheValueWrapper = get(key);
if (cacheValueWrapper != null) {
var cacheValue = cacheValueWrapper.get();
return cacheValue == null ? null : cacheValueToResult.apply(cacheValue);
}
return dbCall.get();
}
}
}

View File

@@ -0,0 +1,51 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.cache;
import java.io.Serializable;
import java.util.Collection;
import java.util.Optional;
import java.util.function.Supplier;
public interface VersionedCache<K extends VersionedCacheKey, V extends Serializable & HasVersion> extends TransactionalCache<K, V> {
CacheValueWrapper<V> get(K key);
default V get(K key, Supplier<V> supplier) {
return get(key, supplier, true);
}
default V get(K key, Supplier<V> supplier, boolean putToCache) {
return Optional.ofNullable(get(key))
.map(CacheValueWrapper::get)
.orElseGet(() -> {
V value = supplier.get();
if (putToCache) {
put(key, value);
}
return value;
});
}
void put(K key, V value);
void evict(K key);
void evict(Collection<K> keys);
void evict(K key, Integer version);
default Integer getVersion(V value) {
if (value == null) {
return 0;
} else if (value.getVersion() != null) {
return value.getVersion();
} else {
return null;
}
}
}

View File

@@ -0,0 +1,15 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.cache;
import java.io.Serializable;
public interface VersionedCacheKey extends Serializable {
default boolean isVersioned() {
return false;
}
}

View File

@@ -0,0 +1,86 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.cache;
import org.springframework.cache.Cache;
import org.springframework.cache.CacheManager;
import sanbing.jcpp.infrastructure.util.JCPPPair;
import java.io.Serializable;
public abstract class VersionedCaffeineCache<K extends VersionedCacheKey, V extends Serializable & HasVersion> extends CaffeineTransactionalCache<K, V> implements VersionedCache<K, V> {
public VersionedCaffeineCache(CacheManager cacheManager, String cacheName) {
super(cacheManager, cacheName);
}
@Override
public CacheValueWrapper<V> get(K key) {
JCPPPair<Long, V> versionValuePair = doGet(key);
if (versionValuePair != null) {
return SimpleCacheValueWrapper.wrap(versionValuePair.getSecond());
}
return null;
}
@Override
public void put(K key, V value) {
Integer version = getVersion(value);
if (version == null) {
return;
}
doPut(key, value, version);
}
private void doPut(K key, V value, Integer version) {
lock.lock();
try {
JCPPPair<Long, V> versionValuePair = doGet(key);
if (versionValuePair == null || version > versionValuePair.getFirst()) {
failAllTransactionsByKey(key);
cache.put(key, wrapValue(value, version));
}
} finally {
lock.unlock();
}
}
private JCPPPair<Long, V> doGet(K key) {
Cache.ValueWrapper source = cache.get(key);
if (source != null && source.get() instanceof JCPPPair pair) {
return pair;
}
return null;
}
@Override
public void evict(K key) {
lock.lock();
try {
failAllTransactionsByKey(key);
cache.evict(key);
} finally {
lock.unlock();
}
}
@Override
public void evict(K key, Integer version) {
if (version == null) {
return;
}
doPut(key, null, version);
}
@Override
void doPutIfAbsent(K key, V value) {
cache.putIfAbsent(key, wrapValue(value, getVersion(value)));
}
private JCPPPair<Integer, V> wrapValue(V value, Integer version) {
return JCPPPair.of(version, value);
}
}

View File

@@ -0,0 +1,155 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.cache;
import jakarta.annotation.PostConstruct;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.NotImplementedException;
import org.springframework.dao.InvalidDataAccessApiUsageException;
import org.springframework.data.redis.connection.RedisConnection;
import org.springframework.data.redis.connection.ReturnType;
import org.springframework.data.redis.connection.lettuce.LettuceConnectionFactory;
import org.springframework.data.redis.core.types.Expiration;
import org.springframework.data.redis.serializer.StringRedisSerializer;
import java.io.Serializable;
import java.util.Arrays;
@Slf4j
public abstract class VersionedRedisCache<K extends VersionedCacheKey, V extends Serializable & HasVersion> extends RedisTransactionalCache<K, V> implements VersionedCache<K, V> {
private static final int VERSION_SIZE = 8;
private static final int VALUE_END_OFFSET = -1;
static final byte[] SET_VERSIONED_VALUE_LUA_SCRIPT = StringRedisSerializer.UTF_8.serialize("""
local key = KEYS[1]
local newValue = ARGV[1]
local newVersion = tonumber(ARGV[2])
local expiration = tonumber(ARGV[3])
local function setNewValue()
local newValueWithVersion = struct.pack(">I8", newVersion) .. newValue
redis.call('SET', key, newValueWithVersion, 'EX', expiration)
end
-- Get the current version (first 8 bytes) of the current value
local currentVersionBytes = redis.call('GETRANGE', key, 0, 7)
if currentVersionBytes and #currentVersionBytes == 8 then
local currentVersion = struct.unpack(">I8", currentVersionBytes)
if newVersion > currentVersion then
setNewValue()
end
else
-- If the current value is absent or the current version is not found, set the new value
setNewValue()
end
""");
static final byte[] SET_VERSIONED_VALUE_SHA = StringRedisSerializer.UTF_8.serialize("0453cb1814135b706b4198b09a09f43c9f67bbfe");
public VersionedRedisCache(String cacheName, CacheSpecsMap cacheSpecsMap, LettuceConnectionFactory connectionFactory, JCPPRedisCacheConfiguration configuration, JCPPRedisSerializer<K, V> valueSerializer) {
super(cacheName, cacheSpecsMap, connectionFactory, configuration, valueSerializer);
}
@PostConstruct
public void init() {
try (var connection = getConnection(SET_VERSIONED_VALUE_SHA)) {
log.debug("Loading LUA with expected SHA[{}], connection [{}]", new String(SET_VERSIONED_VALUE_SHA), connection.getNativeConnection());
String sha = connection.scriptingCommands().scriptLoad(SET_VERSIONED_VALUE_LUA_SCRIPT);
if (!Arrays.equals(SET_VERSIONED_VALUE_SHA, StringRedisSerializer.UTF_8.serialize(sha))) {
log.error("SHA for SET_VERSIONED_VALUE_LUA_SCRIPT wrong! Expected [{}], but actual [{}], connection [{}]", new String(SET_VERSIONED_VALUE_SHA), sha, connection.getNativeConnection());
}
} catch (Throwable t) {
log.error("Error on Redis versioned cache init", t);
}
}
@Override
protected byte[] doGet(K key, RedisConnection connection) {
if (!key.isVersioned()) {
return super.doGet(key, connection);
}
byte[] rawKey = getRawKey(key);
return connection.stringCommands().getRange(rawKey, VERSION_SIZE, VALUE_END_OFFSET);
}
@Override
public void put(K key, V value) {
if (!key.isVersioned()) {
super.put(key, value);
return;
}
Integer version = getVersion(value);
if (version == null) {
return;
}
doPut(key, value, version, cacheTtl);
}
@Override
public void put(K key, V value, RedisConnection connection) {
if (!key.isVersioned()) {
super.put(key, value, connection); // because scripting commands are not supported in transaction mode
return;
}
Integer version = getVersion(value);
if (version == null) {
return;
}
byte[] rawKey = getRawKey(key);
doPut(rawKey, value, version, cacheTtl, connection);
}
private void doPut(K key, V value, Integer version, Expiration expiration) {
if (!cacheEnabled) {
return;
}
log.trace("put [{}][{}][{}]", key, value, version);
final byte[] rawKey = getRawKey(key);
try (var connection = getConnection(rawKey)) {
doPut(rawKey, value, version, expiration, connection);
}
}
private void doPut(byte[] rawKey, V value, Integer version, Expiration expiration, RedisConnection connection) {
byte[] rawValue = getRawValue(value);
byte[] rawVersion = StringRedisSerializer.UTF_8.serialize(String.valueOf(version));
byte[] rawExpiration = StringRedisSerializer.UTF_8.serialize(String.valueOf(expiration.getExpirationTimeInSeconds()));
try {
connection.scriptingCommands().evalSha(SET_VERSIONED_VALUE_SHA, ReturnType.VALUE, 1, rawKey, rawValue, rawVersion, rawExpiration);
} catch (InvalidDataAccessApiUsageException e) {
log.debug("loading LUA [{}]", connection.getNativeConnection());
String sha = connection.scriptingCommands().scriptLoad(SET_VERSIONED_VALUE_LUA_SCRIPT);
if (!Arrays.equals(SET_VERSIONED_VALUE_SHA, StringRedisSerializer.UTF_8.serialize(sha))) {
log.error("SHA for SET_VERSIONED_VALUE_LUA_SCRIPT wrong! Expected [{}], but actual [{}]", new String(SET_VERSIONED_VALUE_SHA), sha);
}
try {
connection.scriptingCommands().evalSha(SET_VERSIONED_VALUE_SHA, ReturnType.VALUE, 1, rawKey, rawValue, rawVersion, rawExpiration);
} catch (InvalidDataAccessApiUsageException ignored) {
log.debug("Slowly executing eval instead of fast evalsha");
connection.scriptingCommands().eval(SET_VERSIONED_VALUE_LUA_SCRIPT, ReturnType.VALUE, 1, rawKey, rawValue, rawVersion, rawExpiration);
}
}
}
@Override
public void evict(K key, Integer version) {
log.trace("evict [{}][{}]", key, version);
if (version != null) {
doPut(key, null, version, evictExpiration);
}
}
@Override
public void putIfAbsent(K key, V value) {
throw new NotImplementedException("putIfAbsent is not supported by versioned cache");
}
@Override
public void evictOrPut(K key, V value) {
throw new NotImplementedException("evictOrPut is not supported by versioned cache");
}
}

View File

@@ -0,0 +1,50 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
抖音关注:程序员三丙
知识星球https://t.zsxq.com/j9b21
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>sanbing</groupId>
<artifactId>jcpp-parent</artifactId>
<version>1.0.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>jcpp-infrastructure-proto</artifactId>
<packaging>jar</packaging>
<name>JChargePointProtocol Infrastructure Proto Module</name>
<description>基础Protobuf模块</description>
<properties>
<main.dir>${basedir}/..</main.dir>
</properties>
<dependencies>
<dependency>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
</dependency>
<dependency>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java-util</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.xolstice.maven.plugins</groupId>
<artifactId>protobuf-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
</project>

View File

@@ -0,0 +1,57 @@
/**
* 抖音关注:程序员三丙
* 知识星球https://t.zsxq.com/j9b21
*/
package sanbing.jcpp.infrastructure.proto;
import sanbing.jcpp.infrastructure.proto.model.PricingModel;
import sanbing.jcpp.infrastructure.proto.model.PricingModel.FlagPrice;
import sanbing.jcpp.infrastructure.proto.model.PricingModel.Period;
import sanbing.jcpp.proto.gen.ProtocolProto.*;
import java.util.Map;
/**
* @author baigod
*/
public class ProtoConverter {
public static PricingModelProto toPricingModel(PricingModel pricingModel) {
// 创建 PricingModelProto 实例
PricingModelProto.Builder builder = PricingModelProto.newBuilder();
// 设置字段
builder.setType(PricingModelType.valueOf(pricingModel.getType().name()));
builder.setRule(PricingModelRule.valueOf(pricingModel.getRule().name()));
builder.setStandardElec(pricingModel.getStandardElec());
builder.setStandardServ(pricingModel.getStandardServ());
// 转换 flagPriceList
for (Map.Entry<PricingModelFlag, FlagPrice> entry : pricingModel.getFlagPriceList().entrySet()) {
PricingModelFlag flag = entry.getKey();
FlagPrice flagPrice = entry.getValue();
FlagPriceProto flagPriceProto = FlagPriceProto.newBuilder()
.setFlag(PricingModelFlag.valueOf(flag.name())) // 枚举转换
.setElec(flagPrice.getElec())
.setServ(flagPrice.getServ())
.build();
builder.putFlagPrice(flag.ordinal(), flagPriceProto); // 按 ordinal 值作为 key 存入
}
// 转换 PeriodsList
for (Period period : pricingModel.getPeriodsList()) {
PeriodProto periodProto = PeriodProto.newBuilder()
.setSn(period.getSn())
.setBegin(period.getBegin().toString()) // 假设 begin 是 LocalTime, 转换为字符串
.setEnd(period.getEnd().toString()) // 假设 end 是 LocalTime, 转换为字符串
.setFlag(PricingModelFlag.valueOf(period.getFlag().name()))
.build();
builder.addPeriod(periodProto);
}
return builder.build();
}
}

Some files were not shown because too many files have changed in this diff Show More