refactor(k01): flatten directory layout and split deployment into five scripts
build-and-push-harbor / docker-build-push (push) Waiting to run

This commit is contained in:
zetaloop
2026-05-06 02:37:25 +08:00
parent 8ba8c7ca20
commit c575b53843
31 changed files with 95 additions and 57 deletions
+10 -21
View File
@@ -2,7 +2,7 @@
该目录是 juwan-backend 所有 k3s 节点的初始化配置。公网入口由 center 的 Caddy 接管——`/wt/*` 走 UDP 直达 chat-api,其余路径反代到 envoy-gateway NodePort 30080。
第一台机器按以下步骤初始化为 k3s server;后续加入的 k02、k03 只运行 `install-k3s.sh agent`,其他步骤k8s Secret、CR、业务 yaml在 server 上 apply 一次即可。
第一台机器按以下步骤初始化为 k3s server;后续加入的 k02、k03 只运行 `install.sh agent`,其他步骤在 server 上执行一次即可。
## 前置条件
@@ -12,29 +12,18 @@
- `/root/registry-password` 文件存放 zot admin 密码(`chmod 600`
- `.env` 已按 `.env.example` 填好(zot admin 密码、Brevo SMTP、Garage S3 凭据)
如果还没 `.env`:先 `cp .env.example .env && nano .env`,再跑 `install-k3s.sh`
如果还没 `.env`:先 `cp .env.example .env && nano .env`,再跑 `secrets.sh`
## k3s server 初始化
```bash
cd /root/juwan-backend/deploy/k01
# 装 k3s + Helm + 四个 Operator,并自动调用 secrets.sh 生成全部 k8s Secret
bash install-k3s.sh
# 应用基础设施层(Operator CR
kubectl apply -f 01-infra/postgres.yaml
kubectl apply -f 01-infra/redis.yaml
kubectl apply -f 01-infra/mongo.yaml
kubectl apply -f 01-infra/kafka.yaml
kubectl apply -f 01-infra/ratelimit.yaml
kubectl apply -f 01-infra/envoy.yaml
# PostgreSQL Cluster 全部 Ready 后写入 schema 与 fixture
bash 01-infra/load-schema.sh
# 启动业务服务
kubectl apply -f 02-service/
bash install.sh # k3s + Helm + 四个 Operator
bash secrets.sh # 生成所有 k8s Secret
bash apply-infra.sh # 数据层 + envoy + ratelimit,分批等待 Ready
bash apply-schema.sh # 向 CNPG 写入 schema 与 fixture
bash apply-services.sh # 启动业务 Deployment
kubectl -n juwan get pods -w
```
@@ -43,7 +32,7 @@ kubectl -n juwan get pods -w
控制面是 k3s server,跑着 CNPG / Strimzi / Redis / MongoDB 四个 Operator 管理有状态服务。
数据层 11 个 per-domain PostgreSQL Cluster + 12 个 RedisReplication + 1 个 MongoDBCommunitychat+ Strimzi KRaft Kafka1 broker
数据层 11 个 per-domain PostgreSQL Cluster + 12 个 RedisReplication + 1 个 MongoDBCommunity + Strimzi KRaft Kafka。
业务层 27 个 Go 服务镜像指向 `registry.juwan.xhttp.zip/juwan/<name>:latest`,每个 domain 一套 rpc + api,外加 snowflake、authz-adapter、email-mq 和 frontend。所有 Deployment 带 `imagePullSecrets: registry-creds`containerd 的 `registries.yaml` 配了 zot admin 凭据。
@@ -55,7 +44,7 @@ chat-api 的 WebTransport 走 UDP 8443 hostPortcenter Caddy 的 PR 7669 fork
`secrets.sh` 生成随机密码写入 `secrets/` 目录,同时 `kubectl create secret``juwan` namespace。需要手动填的是 `.env` 里的 zot admin 密码、Brevo SMTP key 和 Garage S3 access key。
CNPG 每个 Cluster Ready 后自动生成 `<cluster>-app` Secretusername/password/dbname/host/port),业务 pod 的 env 直接从这些 Secret 取值
CNPG 每个 Cluster Ready 后自动生成 `<cluster>-app` Secretusername/password/dbname/host/port),业务 pod 的 env 这些 Secret 提供
## 加节点
@@ -71,7 +60,7 @@ cat /var/lib/rancher/k3s/server/node-token
cd /root/juwan-backend/deploy/k01
echo "<zot-admin-password>" > /root/registry-password && chmod 600 /root/registry-password
K3S_URL=https://<server-ip>:6443 K3S_TOKEN=<token> bash install-k3s.sh agent
K3S_URL=https://<server-ip>:6443 K3S_TOKEN=<token> bash install.sh agent
```
## 日常操作
+29
View File
@@ -0,0 +1,29 @@
#!/usr/bin/env bash
set -euo pipefail
INFRA_DIR="$(cd "$(dirname "$0")/infra" && pwd)"
export KUBECONFIG="${KUBECONFIG:-/etc/rancher/k3s/k3s.yaml}"
echo envoy + ratelimit
kubectl apply -f "${INFRA_DIR}/envoy.yaml"
kubectl apply -f "${INFRA_DIR}/ratelimit.yaml"
kubectl -n juwan wait --for=condition=Ready pod -l app=envoy-gateway --timeout=120s || true
kubectl -n juwan wait --for=condition=Ready pod -l "app in (ratelimit,rl-redis)" --timeout=120s || true
echo redis
kubectl apply -f "${INFRA_DIR}/redis.yaml"
kubectl -n juwan wait --for=jsonpath='{.status.readyReplicas}'=1 redisreplication --all --timeout=600s || true
echo postgres
kubectl apply -f "${INFRA_DIR}/postgres.yaml"
kubectl -n juwan wait --for=condition=Ready cluster --all --timeout=900s || true
echo mongo
kubectl apply -f "${INFRA_DIR}/mongo.yaml"
kubectl -n juwan wait --for=jsonpath='{.status.phase}'=Running mongodbcommunity/chat-mongodb --timeout=600s || true
echo kafka
kubectl apply -f "${INFRA_DIR}/kafka.yaml"
kubectl -n kafka wait --for=condition=Ready kafka/juwan-kafka --timeout=900s || true
kubectl get pods -A
@@ -8,19 +8,12 @@ FIXTURE_DIR="$REPO_ROOT/deploy/dev/fixture"
export KUBECONFIG=/etc/rancher/k3s/k3s.yaml
declare -A SCHEMA_MAP=(
[user-db]=users
[player-db]=player
[game-db]=game
[shop-db]=shop
[order-db]=order
[wallet-db]=wallet
[community-db]=community
[review-db]=review
[dispute-db]=dispute
[notification-db]=notification
[search-db]=search
)
domain_dir() {
case "$1" in
user) echo users ;;
*) echo "$1" ;;
esac
}
psql_exec() {
local cluster="$1" sql="$2"
@@ -34,25 +27,27 @@ psql_file() {
-v ON_ERROR_STOP=1 -U app -d app < "$file"
}
for cluster in "${!SCHEMA_MAP[@]}"; do
domain="${SCHEMA_MAP[$cluster]}"
echo ">>> $cluster ($domain)"
clusters=()
while IFS= read -r name; do
clusters+=("$name")
done < <(kubectl -n juwan get cluster -o jsonpath='{.items[*].metadata.name}' | tr ' ' '\n')
for cluster in "${clusters[@]}"; do
domain="${cluster%-db}"
dir="$(domain_dir "$domain")"
echo "$cluster"
kubectl -n juwan wait --for=condition=Ready "cluster.postgresql.cnpg.io/${cluster}" --timeout=300s
psql_file "$cluster" "$SQL_DIR/common/update_updated_at_column.sql"
for f in "$SQL_DIR/$domain"/*.sql; do
for f in "$SQL_DIR/$dir"/*.sql; do
[ -f "$f" ] || continue
echo " schema: $(basename "$f")"
echo " $(basename "$f")"
psql_file "$cluster" "$f"
done
if [ -f "$FIXTURE_DIR/$domain.sql" ]; then
echo " fixture: $domain.sql"
psql_file "$cluster" "$FIXTURE_DIR/$domain.sql"
if [ -f "$FIXTURE_DIR/$dir.sql" ]; then
echo " $dir.sql"
psql_file "$cluster" "$FIXTURE_DIR/$dir.sql"
fi
done
echo
echo "schema + fixture loaded into 11 CNPG clusters"
echo "schema + fixture loaded, ${#clusters[@]} clusters"
+30
View File
@@ -0,0 +1,30 @@
#!/usr/bin/env bash
set -euo pipefail
SVC_DIR="$(cd "$(dirname "$0")/services" && pwd)"
export KUBECONFIG="${KUBECONFIG:-/etc/rancher/k3s/k3s.yaml}"
apply_wait() {
for f in "$@"; do
echo "${f%.yaml}"
kubectl apply -f "${SVC_DIR}/${f}"
done
kubectl -n juwan wait --for=condition=Available deploy --all --timeout=600s || true
}
cd "$SVC_DIR"
apply_wait snowflake.yaml authz-adapter.yaml
domain_files=()
for f in *.yaml; do
case "$f" in
snowflake.yaml|authz-adapter.yaml|chat.yaml|email.yaml|frontend.yaml) ;;
*) domain_files+=("$f") ;;
esac
done
apply_wait "${domain_files[@]}"
apply_wait chat.yaml email.yaml frontend.yaml
kubectl get pods -n juwan
@@ -21,13 +21,6 @@ fi
K01_DIR="$(cd "$(dirname "$0")" && pwd)"
if [ "$MODE" = "server" ] && [ ! -f "${K01_DIR}/.env" ]; then
echo ".env not found in ${K01_DIR}" >&2
echo " cp ${K01_DIR}/.env.example ${K01_DIR}/.env" >&2
echo " nano ${K01_DIR}/.env" >&2
exit 1
fi
write_registries() {
mkdir -p /etc/rancher/k3s
cat > /etc/rancher/k3s/registries.yaml <<EOF
@@ -82,7 +75,7 @@ systemctl restart k3s
export KUBECONFIG=/etc/rancher/k3s/k3s.yaml
until kubectl get nodes >/dev/null 2>&1; do sleep 2; done
kubectl apply -f "${K01_DIR}/00-base/"
kubectl apply -f "${K01_DIR}/base/"
kubectl apply --server-side --force-conflicts -f \
"https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v${CNPG_VERSION}/cnpg-${CNPG_VERSION}.yaml"
@@ -121,8 +114,6 @@ kubectl -n kafka rollout status deploy/strimzi-cluster-operator --timeout=300s
kubectl -n redis-operator rollout status deploy/redis-operator --timeout=300s
kubectl -n mongodb-operator rollout status deploy/mongodb-kubernetes-operator --timeout=300s
bash "${K01_DIR}/secrets.sh"
echo
echo "k3s server + 4 operators ready"
echo "node token: $(cat /var/lib/rancher/k3s/server/node-token)"
+5 -1
View File
@@ -69,7 +69,11 @@ kubectl -n juwan create secret tls chat-wt-tls \
--key="${DEV_CERTS}/tls.key" \
--dry-run=client -o yaml | kubectl apply -f -
DOMAINS=(user player game shop order wallet community review dispute notification search chat)
DOMAINS=()
while IFS= read -r name; do
DOMAINS+=("${name%-redis}")
done < <(grep -E '^ name: [a-z-]+-redis$' "$K01_DIR/infra/redis.yaml" | awk '{print $2}')
for d in "${DOMAINS[@]}"; do
pwd_val="$(openssl rand -hex 16)"
write_secret "redis-${d}-password" "$pwd_val"