Compare commits

3 Commits

Author SHA1 Message Date
zetaloop 45ade5a6a0 fix(k01): merge secrets.sh into install-k3s.sh and lower operator resource requests
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-06 00:52:10 +08:00
zetaloop 4d93678046 fix(k01): lower operator resource requests to fit single-vcpu node
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-06 00:30:05 +08:00
zetaloop 430cc63eb2 fix(k01): use server-side apply for cnpg and strimzi manifests
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-05 13:35:11 +08:00
2 changed files with 35 additions and 18 deletions
+8 -13
View File
@@ -10,24 +10,19 @@
- center 已部署,`registry.juwan.xhttp.zip` 可推可拉 - center 已部署,`registry.juwan.xhttp.zip` 可推可拉
- 已从 Gitea 拉取仓库:`git clone https://git.juwan.xhttp.zip/juwan/juwan-backend.git` - 已从 Gitea 拉取仓库:`git clone https://git.juwan.xhttp.zip/juwan/juwan-backend.git`
- `/root/registry-password` 文件存放 zot admin 密码(`chmod 600` - `/root/registry-password` 文件存放 zot admin 密码(`chmod 600`
- `.env` 已按 `.env.example` 填好(zot admin 密码、Brevo SMTP、Garage S3 凭据)
如果还没 `.env`:先 `cp .env.example .env && nano .env`,再跑 `install-k3s.sh`
## k3s server 初始化 ## k3s server 初始化
```bash ```bash
cd /root/juwan-backend/deploy/k01 cd /root/juwan-backend/deploy/k01
# 装 k3s(禁用内置 traefik+ Helm + 四个 Operator # 装 k3s + Helm + 四个 Operator,并自动调用 secrets.sh 生成全部 k8s Secret
bash install-k3s.sh bash install-k3s.sh
# 准备 .env,填 zot Admin 密码 / Brevo SMTP / Garage S3 凭据 # 应用基础设施层(Operator CR
cp .env.example .env
nano .env
# 应用 namespace + RBAC,生成全部 k8s Secret
kubectl apply -f 00-base/
bash secrets.sh
# 应用基础设施(Operator CR
kubectl apply -f 01-infra/postgres.yaml kubectl apply -f 01-infra/postgres.yaml
kubectl apply -f 01-infra/redis.yaml kubectl apply -f 01-infra/redis.yaml
kubectl apply -f 01-infra/mongo.yaml kubectl apply -f 01-infra/mongo.yaml
@@ -35,7 +30,7 @@ kubectl apply -f 01-infra/kafka.yaml
kubectl apply -f 01-infra/ratelimit.yaml kubectl apply -f 01-infra/ratelimit.yaml
kubectl apply -f 01-infra/envoy.yaml kubectl apply -f 01-infra/envoy.yaml
# PostgreSQL Cluster 全部 Ready 后再灌 schema # PostgreSQL Cluster 全部 Ready 后写入 schema 与 fixture
bash 01-infra/load-schema.sh bash 01-infra/load-schema.sh
# 启动业务服务 # 启动业务服务
@@ -46,7 +41,7 @@ kubectl -n juwan get pods -w
## 做什么 ## 做什么
四层结构。控制面是 k3s server,跑着 CNPG / Strimzi / Redis / MongoDB 四个 Operator 管理有状态服务。 控制面是 k3s server,跑着 CNPG / Strimzi / Redis / MongoDB 四个 Operator 管理有状态服务。
数据层 11 个 per-domain PostgreSQL Cluster + 12 个 RedisReplication + 1 个 MongoDBCommunitychat+ Strimzi KRaft Kafka1 broker)。 数据层 11 个 per-domain PostgreSQL Cluster + 12 个 RedisReplication + 1 个 MongoDBCommunitychat+ Strimzi KRaft Kafka1 broker)。
@@ -54,7 +49,7 @@ kubectl -n juwan get pods -w
email-api 跟 user-rpc 共用 user-redis 实例,因为注册和重置密码的验证码 key 跨服务读写。 email-api 跟 user-rpc 共用 user-redis 实例,因为注册和重置密码的验证码 key 跨服务读写。
chat-api 的 WebTransport 走 UDP 8443 hostPortcenter Caddy 的 PR 7669 fork 把这路流量终结后反代过来 chat-api 的 WebTransport 走 UDP 8443 hostPortcenter Caddy 的 PR 7669 fork 在中心握手后反向代理 WebTransport 连接到 chat-api
## 生成的 Secret ## 生成的 Secret
+27 -5
View File
@@ -19,6 +19,15 @@ if [ ! -f /root/registry-password ]; then
exit 1 exit 1
fi fi
K01_DIR="$(cd "$(dirname "$0")" && pwd)"
if [ "$MODE" = "server" ] && [ ! -f "${K01_DIR}/.env" ]; then
echo ".env not found in ${K01_DIR}" >&2
echo " cp ${K01_DIR}/.env.example ${K01_DIR}/.env" >&2
echo " nano ${K01_DIR}/.env" >&2
exit 1
fi
write_registries() { write_registries() {
mkdir -p /etc/rancher/k3s mkdir -p /etc/rancher/k3s
cat > /etc/rancher/k3s/registries.yaml <<EOF cat > /etc/rancher/k3s/registries.yaml <<EOF
@@ -73,15 +82,18 @@ systemctl restart k3s
export KUBECONFIG=/etc/rancher/k3s/k3s.yaml export KUBECONFIG=/etc/rancher/k3s/k3s.yaml
until kubectl get nodes >/dev/null 2>&1; do sleep 2; done until kubectl get nodes >/dev/null 2>&1; do sleep 2; done
K01_DIR="$(cd "$(dirname "$0")" && pwd)"
kubectl apply -f "${K01_DIR}/00-base/" kubectl apply -f "${K01_DIR}/00-base/"
kubectl apply -f \ kubectl apply --server-side --force-conflicts -f \
"https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v${CNPG_VERSION}/cnpg-${CNPG_VERSION}.yaml" "https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v${CNPG_VERSION}/cnpg-${CNPG_VERSION}.yaml"
kubectl -n cnpg-system set resources deploy/cnpg-controller-manager \
--requests=cpu=50m,memory=64Mi --limits=cpu=200m,memory=256Mi
kubectl create namespace kafka 2>/dev/null || true kubectl create namespace kafka 2>/dev/null || true
kubectl apply -n kafka \ kubectl apply --server-side --force-conflicts -n kafka \
-f "https://github.com/strimzi/strimzi-kafka-operator/releases/download/${STRIMZI_VERSION}/strimzi-cluster-operator-${STRIMZI_VERSION}.yaml" -f "https://github.com/strimzi/strimzi-kafka-operator/releases/download/${STRIMZI_VERSION}/strimzi-cluster-operator-${STRIMZI_VERSION}.yaml"
kubectl -n kafka set resources deploy/strimzi-cluster-operator \
--requests=cpu=50m,memory=200Mi --limits=cpu=500m,memory=500Mi
helm repo add ot-helm https://ot-container-kit.github.io/helm-charts/ 2>/dev/null || true helm repo add ot-helm https://ot-container-kit.github.io/helm-charts/ 2>/dev/null || true
helm repo add mongodb https://mongodb.github.io/helm-charts 2>/dev/null || true helm repo add mongodb https://mongodb.github.io/helm-charts 2>/dev/null || true
@@ -89,18 +101,28 @@ helm repo update
helm upgrade --install redis-operator ot-helm/redis-operator \ helm upgrade --install redis-operator ot-helm/redis-operator \
--version "${REDIS_OP_VERSION}" \ --version "${REDIS_OP_VERSION}" \
--namespace redis-operator --create-namespace --namespace redis-operator --create-namespace \
--set resources.requests.cpu=50m \
--set resources.requests.memory=100Mi \
--set resources.limits.cpu=500m \
--set resources.limits.memory=500Mi
helm upgrade --install mongodb-kubernetes mongodb/mongodb-kubernetes \ helm upgrade --install mongodb-kubernetes mongodb/mongodb-kubernetes \
--version "${MONGODB_OP_VERSION}" \ --version "${MONGODB_OP_VERSION}" \
--namespace mongodb-operator --create-namespace \ --namespace mongodb-operator --create-namespace \
--set operator.watchNamespace=juwan --set operator.watchNamespace=juwan \
--set operator.resources.requests.cpu=50m \
--set operator.resources.requests.memory=100Mi \
--set operator.resources.limits.cpu=500m \
--set operator.resources.limits.memory=300Mi
kubectl -n cnpg-system rollout status deploy/cnpg-controller-manager --timeout=300s kubectl -n cnpg-system rollout status deploy/cnpg-controller-manager --timeout=300s
kubectl -n kafka rollout status deploy/strimzi-cluster-operator --timeout=300s kubectl -n kafka rollout status deploy/strimzi-cluster-operator --timeout=300s
kubectl -n redis-operator rollout status deploy/redis-operator --timeout=300s kubectl -n redis-operator rollout status deploy/redis-operator --timeout=300s
kubectl -n mongodb-operator rollout status deploy/mongodb-kubernetes-operator --timeout=300s kubectl -n mongodb-operator rollout status deploy/mongodb-kubernetes-operator --timeout=300s
bash "${K01_DIR}/secrets.sh"
echo echo
echo "k3s server + 4 operators ready" echo "k3s server + 4 operators ready"
echo "node token: $(cat /var/lib/rancher/k3s/server/node-token)" echo "node token: $(cat /var/lib/rancher/k3s/server/node-token)"