Compare commits

26 Commits

Author SHA1 Message Date
zetaloop d6c59b59d4 fix(k01): drop additionalRedpandaCmdFlags to avoid conflict with chart-generated flags
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-06 12:47:25 +08:00
zetaloop fe744ae6c4 fix(k01): enable redpanda developer_mode to skip 1GB memory minimum check
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-06 12:22:45 +08:00
zetaloop e43d2467da fix(k01): drop chartRef.chartVersion from Redpanda CR
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-06 12:13:55 +08:00
zetaloop a25086dcdf fix(k01): relax redpanda operator probes and raise memory limits
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-06 12:05:16 +08:00
zetaloop 6fc320656b feat(k01): replace strimzi kafka with redpanda
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-06 11:39:47 +08:00
zetaloop 1deb5dbdb2 fix(k01): resource requests based on actual usage
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-06 10:24:59 +08:00
zetaloop 6341d746da fix: fuck mongo
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-06 09:48:02 +08:00
zetaloop 50f079d86c fix(k01): replicate operator shell script and wrap exec with setarch for mongod
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-06 09:06:04 +08:00
zetaloop 01f8bc1729 fix(k01): wrap mongod entrypoint with setarch uname-2.6 to bypass kernel 6.19+ guard
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-06 08:32:46 +08:00
zetaloop ed3f80ca73 fix(k01): force glibc pthread rseq for mongo to bypass tcmalloc crash on kernel 6.19+
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-06 07:41:58 +08:00
zetaloop 4d4a16ba1b feat(k01): bump mongo redis images and workaround mongo tcmalloc segfault on newer kernels
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-06 07:18:24 +08:00
zetaloop 92822e9da8 fix(k01): rewrite strimzi namespace via sed before applying manifest 2026-05-06 07:17:40 +08:00
zetaloop 4a8e04d444 fix(k01): shrink postgres redis kafka memory requests
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-06 05:22:32 +08:00
zetaloop b9ff1f043d fix(k01): correct redis wait condition and serialize ratelimit on rl-redis
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-06 04:49:37 +08:00
zetaloop a3174d16d0 fix(k01): scope teardown cleanup to business resources only
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-06 04:38:55 +08:00
zetaloop 4ee866da95 feat(k01): add teardown script for clean reset of data and service layers
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-06 04:32:36 +08:00
zetaloop 513d0dbac2 fix(k01): lower cpu requests so all infra and services fit on 1 vcpu node 2026-05-06 04:32:25 +08:00
zetaloop bc8c5ad152 fix(k01): avoid set -e exit on arithmetic post-increment in apply-infra
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-06 03:52:41 +08:00
zetaloop 8dac0b8d76 fix(k01): apply infra cr documents one by one to avoid scheduler storm
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-06 03:44:54 +08:00
zetaloop da43d9b8f7 fix(k01): set production-grade memory limits across all workloads
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-06 03:24:31 +08:00
zetaloop c575b53843 refactor(k01): flatten directory layout and split deployment into five scripts
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-06 02:37:25 +08:00
zetaloop 8ba8c7ca20 fix(k01): bump kafka crd apiversion to v1 for strimzi 1.0.0
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-06 01:28:07 +08:00
zetaloop 95f3608b4b fix(k01): quote env value with spaces for shell source compatibility
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-06 01:01:24 +08:00
zetaloop 45ade5a6a0 fix(k01): merge secrets.sh into install-k3s.sh and lower operator resource requests
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-06 00:52:10 +08:00
zetaloop 4d93678046 fix(k01): lower operator resource requests to fit single-vcpu node
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-06 00:30:05 +08:00
zetaloop 430cc63eb2 fix(k01): use server-side apply for cnpg and strimzi manifests
build-and-push-harbor / docker-build-push (push) Waiting to run
2026-05-05 13:35:11 +08:00
34 changed files with 520 additions and 217 deletions
+1 -1
View File
@@ -12,7 +12,7 @@ EMAIL_SMTP_PORT=587
EMAIL_SMTP_USERNAME= EMAIL_SMTP_USERNAME=
EMAIL_SMTP_PASSWORD= EMAIL_SMTP_PASSWORD=
EMAIL_FROM_ADDRESS=dev@juwan.xhttp.zip EMAIL_FROM_ADDRESS=dev@juwan.xhttp.zip
EMAIL_FROM_NAME=Juwan Team EMAIL_FROM_NAME="Juwan Team"
EMAIL_REPLY_TO= EMAIL_REPLY_TO=
S3_ENDPOINT=https://s3.juwan.xhttp.zip S3_ENDPOINT=https://s3.juwan.xhttp.zip
-72
View File
@@ -1,72 +0,0 @@
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaNodePool
metadata:
name: controller
namespace: kafka
labels:
strimzi.io/cluster: juwan-kafka
spec:
replicas: 1
roles:
- controller
storage:
type: persistent-claim
size: 1Gi
deleteClaim: false
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaNodePool
metadata:
name: broker
namespace: kafka
labels:
strimzi.io/cluster: juwan-kafka
spec:
replicas: 1
roles:
- broker
storage:
type: persistent-claim
size: 5Gi
deleteClaim: false
---
apiVersion: kafka.strimzi.io/v1beta2
kind: Kafka
metadata:
name: juwan-kafka
namespace: kafka
annotations:
strimzi.io/node-pools: enabled
strimzi.io/kraft: enabled
spec:
kafka:
version: 4.2.0
metadataVersion: 4.2-IV0
listeners:
- name: plain
port: 9092
type: internal
tls: false
config:
offsets.topic.replication.factor: 1
transaction.state.log.replication.factor: 1
transaction.state.log.min.isr: 1
default.replication.factor: 1
min.insync.replicas: 1
entityOperator:
topicOperator: {}
userOperator: {}
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaTopic
metadata:
name: email-task
namespace: kafka
labels:
strimzi.io/cluster: juwan-kafka
spec:
partitions: 1
replicas: 1
+15 -31
View File
@@ -2,7 +2,7 @@
该目录是 juwan-backend 所有 k3s 节点的初始化配置。公网入口由 center 的 Caddy 接管——`/wt/*` 走 UDP 直达 chat-api,其余路径反代到 envoy-gateway NodePort 30080。 该目录是 juwan-backend 所有 k3s 节点的初始化配置。公网入口由 center 的 Caddy 接管——`/wt/*` 走 UDP 直达 chat-api,其余路径反代到 envoy-gateway NodePort 30080。
第一台机器按以下步骤初始化为 k3s server;后续加入的 k02、k03 只运行 `install-k3s.sh agent`,其他步骤k8s Secret、CR、业务 yaml在 server 上 apply 一次即可。 第一台机器按以下步骤初始化为 k3s server;后续加入的 k02、k03 只运行 `install.sh agent`,其他步骤在 server 上执行一次即可。
## 前置条件 ## 前置条件
@@ -10,57 +10,41 @@
- center 已部署,`registry.juwan.xhttp.zip` 可推可拉 - center 已部署,`registry.juwan.xhttp.zip` 可推可拉
- 已从 Gitea 拉取仓库:`git clone https://git.juwan.xhttp.zip/juwan/juwan-backend.git` - 已从 Gitea 拉取仓库:`git clone https://git.juwan.xhttp.zip/juwan/juwan-backend.git`
- `/root/registry-password` 文件存放 zot admin 密码(`chmod 600` - `/root/registry-password` 文件存放 zot admin 密码(`chmod 600`
- `.env` 已按 `.env.example` 填好(zot admin 密码、Brevo SMTP、Garage S3 凭据)
如果还没 `.env`:先 `cp .env.example .env && nano .env`,再跑 `secrets.sh`
## k3s server 初始化 ## k3s server 初始化
```bash ```bash
cd /root/juwan-backend/deploy/k01 cd /root/juwan-backend/deploy/k01
# k3s(禁用内置 traefik+ Helm + 四个 Operator bash install.sh # k3s + Helm + 四个 Operator
bash install-k3s.sh bash secrets.sh # 生成所有 k8s Secret
bash apply-infra.sh # 数据层 + envoy + ratelimit,分批等待 Ready
bash apply-schema.sh # 向 CNPG 写入 schema 与 fixture
bash apply-services.sh # 启动业务 Deployment
# 准备 .env,填 zot Admin 密码 / Brevo SMTP / Garage S3 凭据 # 可以用 `bash teardown.sh` 来卸载数据层和业务层
cp .env.example .env
nano .env
# 应用 namespace + RBAC,生成全部 k8s Secret
kubectl apply -f 00-base/
bash secrets.sh
# 应用基础设施(Operator CR
kubectl apply -f 01-infra/postgres.yaml
kubectl apply -f 01-infra/redis.yaml
kubectl apply -f 01-infra/mongo.yaml
kubectl apply -f 01-infra/kafka.yaml
kubectl apply -f 01-infra/ratelimit.yaml
kubectl apply -f 01-infra/envoy.yaml
# 等 PostgreSQL Cluster 全部 Ready 后再灌 schema
bash 01-infra/load-schema.sh
# 启动业务服务
kubectl apply -f 02-service/
kubectl -n juwan get pods -w
``` ```
## 做什么 ## 做什么
四层结构。控制面是 k3s server,跑着 CNPG / Strimzi / Redis / MongoDB 四个 Operator 管理有状态服务。 控制面是 k3s server,跑着 CNPG / Strimzi / Redis / MongoDB 四个 Operator 管理有状态服务。
数据层 11 个 per-domain PostgreSQL Cluster + 12 个 RedisReplication + 1 个 MongoDBCommunitychat+ Strimzi KRaft Kafka1 broker 数据层 11 个 per-domain PostgreSQL Cluster + 12 个 RedisReplication + 1 个 MongoDBCommunity + Strimzi KRaft Kafka。
业务层 27 个 Go 服务镜像指向 `registry.juwan.xhttp.zip/juwan/<name>:latest`,每个 domain 一套 rpc + api,外加 snowflake、authz-adapter、email-mq 和 frontend。所有 Deployment 带 `imagePullSecrets: registry-creds`containerd 的 `registries.yaml` 配了 zot admin 凭据。 业务层 27 个 Go 服务镜像指向 `registry.juwan.xhttp.zip/juwan/<name>:latest`,每个 domain 一套 rpc + api,外加 snowflake、authz-adapter、email-mq 和 frontend。所有 Deployment 带 `imagePullSecrets: registry-creds`containerd 的 `registries.yaml` 配了 zot admin 凭据。
email-api 跟 user-rpc 共用 user-redis 实例,因为注册和重置密码的验证码 key 跨服务读写。 email-api 跟 user-rpc 共用 user-redis 实例,因为注册和重置密码的验证码 key 跨服务读写。
chat-api 的 WebTransport 走 UDP 8443 hostPortcenter Caddy 的 PR 7669 fork 把这路流量终结后反代过来 chat-api 的 WebTransport 走 UDP 8443 hostPortcenter Caddy 的 PR 7669 fork 在中心握手后反向代理 WebTransport 连接到 chat-api
## 生成的 Secret ## 生成的 Secret
`secrets.sh` 生成随机密码写入 `secrets/` 目录,同时 `kubectl create secret``juwan` namespace。需要手动填的是 `.env` 里的 zot admin 密码、Brevo SMTP key 和 Garage S3 access key。 `secrets.sh` 生成随机密码写入 `secrets/` 目录,同时 `kubectl create secret``juwan` namespace。需要手动填的是 `.env` 里的 zot admin 密码、Brevo SMTP key 和 Garage S3 access key。
CNPG 每个 Cluster Ready 后自动生成 `<cluster>-app` Secretusername/password/dbname/host/port),业务 pod 的 env 直接从这些 Secret 取值 CNPG 每个 Cluster Ready 后自动生成 `<cluster>-app` Secretusername/password/dbname/host/port),业务 pod 的 env 这些 Secret 提供
## 加节点 ## 加节点
@@ -76,7 +60,7 @@ cat /var/lib/rancher/k3s/server/node-token
cd /root/juwan-backend/deploy/k01 cd /root/juwan-backend/deploy/k01
echo "<zot-admin-password>" > /root/registry-password && chmod 600 /root/registry-password echo "<zot-admin-password>" > /root/registry-password && chmod 600 /root/registry-password
K3S_URL=https://<server-ip>:6443 K3S_TOKEN=<token> bash install-k3s.sh agent K3S_URL=https://<server-ip>:6443 K3S_TOKEN=<token> bash install.sh agent
``` ```
## 日常操作 ## 日常操作
+52
View File
@@ -0,0 +1,52 @@
#!/usr/bin/env bash
set -euo pipefail
INFRA_DIR="$(cd "$(dirname "$0")/infra" && pwd)"
export KUBECONFIG="${KUBECONFIG:-/etc/rancher/k3s/k3s.yaml}"
apply_docs() {
local file="$1" kind="$2" wait_expr="$3" buf i
i=0
buf=""
while IFS= read -r line; do
if [[ "$line" == "---" ]]; then
printf '%s\n' "$buf" | kubectl apply -f -
i=$((i+1))
echo " ($i) applied"
buf=""
else
[[ -n "$buf" ]] && buf+=$'\n'
buf+="$line"
fi
done < "$file"
printf '%s\n' "$buf" | kubectl apply -f -
i=$((i+1))
echo " ($i) applied"
if [ -n "$kind" ]; then
kubectl -n juwan wait --for="$wait_expr" --timeout=900s "$kind" --all || true
fi
}
echo envoy + ratelimit
kubectl apply -f "${INFRA_DIR}/envoy.yaml"
kubectl apply -f "${INFRA_DIR}/ratelimit.yaml"
kubectl -n juwan wait --for=condition=Ready pod -l app=envoy-gateway --timeout=120s || true
kubectl -n juwan wait --for=condition=Ready pod -l "app in (ratelimit,rl-redis)" --timeout=120s || true
echo redis
apply_docs "${INFRA_DIR}/redis.yaml" "" ""
kubectl -n juwan wait --for=condition=Ready pod -l redis_setup_type=replication --timeout=600s || true
echo postgres
apply_docs "${INFRA_DIR}/postgres.yaml" cluster.postgresql.cnpg.io "condition=Ready"
echo mongo
kubectl apply -f "${INFRA_DIR}/mongo.yaml"
kubectl -n juwan wait --for=jsonpath='{.status.phase}'=Running mongodbcommunity/chat-mongodb --timeout=600s || true
echo kafka
kubectl apply -f "${INFRA_DIR}/kafka.yaml"
kubectl -n redpanda wait --for=condition=Ready redpanda/juwan-kafka --timeout=900s || true
kubectl get pods -A
@@ -8,19 +8,12 @@ FIXTURE_DIR="$REPO_ROOT/deploy/dev/fixture"
export KUBECONFIG=/etc/rancher/k3s/k3s.yaml export KUBECONFIG=/etc/rancher/k3s/k3s.yaml
declare -A SCHEMA_MAP=( domain_dir() {
[user-db]=users case "$1" in
[player-db]=player user) echo users ;;
[game-db]=game *) echo "$1" ;;
[shop-db]=shop esac
[order-db]=order }
[wallet-db]=wallet
[community-db]=community
[review-db]=review
[dispute-db]=dispute
[notification-db]=notification
[search-db]=search
)
psql_exec() { psql_exec() {
local cluster="$1" sql="$2" local cluster="$1" sql="$2"
@@ -34,25 +27,27 @@ psql_file() {
-v ON_ERROR_STOP=1 -U app -d app < "$file" -v ON_ERROR_STOP=1 -U app -d app < "$file"
} }
for cluster in "${!SCHEMA_MAP[@]}"; do clusters=()
domain="${SCHEMA_MAP[$cluster]}" while IFS= read -r name; do
echo ">>> $cluster ($domain)" clusters+=("$name")
done < <(kubectl -n juwan get cluster -o jsonpath='{.items[*].metadata.name}' | tr ' ' '\n')
for cluster in "${clusters[@]}"; do
domain="${cluster%-db}"
dir="$(domain_dir "$domain")"
echo "$cluster"
kubectl -n juwan wait --for=condition=Ready "cluster.postgresql.cnpg.io/${cluster}" --timeout=300s kubectl -n juwan wait --for=condition=Ready "cluster.postgresql.cnpg.io/${cluster}" --timeout=300s
psql_file "$cluster" "$SQL_DIR/common/update_updated_at_column.sql" psql_file "$cluster" "$SQL_DIR/common/update_updated_at_column.sql"
for f in "$SQL_DIR/$dir"/*.sql; do
for f in "$SQL_DIR/$domain"/*.sql; do
[ -f "$f" ] || continue [ -f "$f" ] || continue
echo " schema: $(basename "$f")" echo " $(basename "$f")"
psql_file "$cluster" "$f" psql_file "$cluster" "$f"
done done
if [ -f "$FIXTURE_DIR/$dir.sql" ]; then
if [ -f "$FIXTURE_DIR/$domain.sql" ]; then echo " $dir.sql"
echo " fixture: $domain.sql" psql_file "$cluster" "$FIXTURE_DIR/$dir.sql"
psql_file "$cluster" "$FIXTURE_DIR/$domain.sql"
fi fi
done done
echo echo
echo "schema + fixture loaded into 11 CNPG clusters" echo "schema + fixture loaded, ${#clusters[@]} clusters"
+30
View File
@@ -0,0 +1,30 @@
#!/usr/bin/env bash
set -euo pipefail
SVC_DIR="$(cd "$(dirname "$0")/services" && pwd)"
export KUBECONFIG="${KUBECONFIG:-/etc/rancher/k3s/k3s.yaml}"
apply_wait() {
for f in "$@"; do
echo "${f%.yaml}"
kubectl apply -f "${SVC_DIR}/${f}"
done
kubectl -n juwan wait --for=condition=Available deploy --all --timeout=600s || true
}
cd "$SVC_DIR"
apply_wait snowflake.yaml authz-adapter.yaml
domain_files=()
for f in *.yaml; do
case "$f" in
snowflake.yaml|authz-adapter.yaml|chat.yaml|email.yaml|frontend.yaml) ;;
*) domain_files+=("$f") ;;
esac
done
apply_wait "${domain_files[@]}"
apply_wait chat.yaml email.yaml frontend.yaml
kubectl get pods -n juwan
@@ -1058,8 +1058,10 @@ spec:
periodSeconds: 10 periodSeconds: 10
resources: resources:
requests: requests:
cpu: 100m cpu: 30m
memory: 128Mi memory: 20Mi
limits:
memory: 120Mi
volumes: volumes:
- name: config - name: config
configMap: configMap:
+77
View File
@@ -0,0 +1,77 @@
apiVersion: cluster.redpanda.com/v1alpha2
kind: Redpanda
metadata:
name: juwan-kafka
namespace: redpanda
spec:
clusterSpec:
image:
tag: v26.1.6
fullnameOverride: juwan-kafka
console:
enabled: false
external:
enabled: false
service:
enabled: false
auth:
sasl:
enabled: false
tls:
enabled: false
listeners:
kafka:
port: 9092
authenticationMethod: null
tls:
enabled: false
admin:
tls:
enabled: false
rpc:
tls:
enabled: false
http:
enabled: false
schemaRegistry:
enabled: false
storage:
persistentVolume:
enabled: true
size: 5Gi
storageClass: local-path
resources:
requests:
cpu: 50m
memory: 200Mi
limits:
cpu: 500m
memory: 500Mi
config:
node:
developer_mode: true
statefulset:
replicas: 1
podTemplate:
spec:
affinity:
podAntiAffinity: null
tuning:
tune_aio_events: false
logging:
logLevel: info
usageStats:
enabled: false
---
apiVersion: cluster.redpanda.com/v1alpha2
kind: Topic
metadata:
name: email-task
namespace: redpanda
spec:
partitions: 1
replicationFactor: 1
cluster:
clusterRef:
name: juwan-kafka
@@ -6,7 +6,7 @@ metadata:
spec: spec:
members: 1 members: 1
type: ReplicaSet type: ReplicaSet
version: "8.2.6" version: "7.0.32"
security: security:
authentication: authentication:
modes: modes:
@@ -24,6 +24,23 @@ spec:
storage.wiredTiger.engineConfig.journalCompressor: zlib storage.wiredTiger.engineConfig.journalCompressor: zlib
statefulSet: statefulSet:
spec: spec:
template:
spec:
containers:
- name: mongod
resources:
requests:
cpu: 30m
memory: 80Mi
limits:
memory: 400Mi
- name: mongodb-agent
resources:
requests:
cpu: 20m
memory: 35Mi
limits:
memory: 100Mi
volumeClaimTemplates: volumeClaimTemplates:
- metadata: - metadata:
name: data-volume name: data-volume
@@ -13,6 +13,12 @@ spec:
owner: app owner: app
storage: storage:
size: 1Gi size: 1Gi
resources:
requests:
cpu: 30m
memory: 50Mi
limits:
memory: 200Mi
--- ---
apiVersion: postgresql.cnpg.io/v1 apiVersion: postgresql.cnpg.io/v1
@@ -30,6 +36,12 @@ spec:
owner: app owner: app
storage: storage:
size: 1Gi size: 1Gi
resources:
requests:
cpu: 30m
memory: 50Mi
limits:
memory: 200Mi
--- ---
apiVersion: postgresql.cnpg.io/v1 apiVersion: postgresql.cnpg.io/v1
@@ -47,6 +59,12 @@ spec:
owner: app owner: app
storage: storage:
size: 1Gi size: 1Gi
resources:
requests:
cpu: 30m
memory: 50Mi
limits:
memory: 200Mi
--- ---
apiVersion: postgresql.cnpg.io/v1 apiVersion: postgresql.cnpg.io/v1
@@ -64,6 +82,12 @@ spec:
owner: app owner: app
storage: storage:
size: 1Gi size: 1Gi
resources:
requests:
cpu: 30m
memory: 50Mi
limits:
memory: 200Mi
--- ---
apiVersion: postgresql.cnpg.io/v1 apiVersion: postgresql.cnpg.io/v1
@@ -81,6 +105,12 @@ spec:
owner: app owner: app
storage: storage:
size: 1Gi size: 1Gi
resources:
requests:
cpu: 30m
memory: 50Mi
limits:
memory: 200Mi
--- ---
apiVersion: postgresql.cnpg.io/v1 apiVersion: postgresql.cnpg.io/v1
@@ -98,6 +128,12 @@ spec:
owner: app owner: app
storage: storage:
size: 1Gi size: 1Gi
resources:
requests:
cpu: 30m
memory: 50Mi
limits:
memory: 200Mi
--- ---
apiVersion: postgresql.cnpg.io/v1 apiVersion: postgresql.cnpg.io/v1
@@ -115,6 +151,12 @@ spec:
owner: app owner: app
storage: storage:
size: 1Gi size: 1Gi
resources:
requests:
cpu: 30m
memory: 50Mi
limits:
memory: 200Mi
--- ---
apiVersion: postgresql.cnpg.io/v1 apiVersion: postgresql.cnpg.io/v1
@@ -132,6 +174,12 @@ spec:
owner: app owner: app
storage: storage:
size: 1Gi size: 1Gi
resources:
requests:
cpu: 30m
memory: 50Mi
limits:
memory: 200Mi
--- ---
apiVersion: postgresql.cnpg.io/v1 apiVersion: postgresql.cnpg.io/v1
@@ -149,6 +197,12 @@ spec:
owner: app owner: app
storage: storage:
size: 1Gi size: 1Gi
resources:
requests:
cpu: 30m
memory: 50Mi
limits:
memory: 200Mi
--- ---
apiVersion: postgresql.cnpg.io/v1 apiVersion: postgresql.cnpg.io/v1
@@ -166,6 +220,12 @@ spec:
owner: app owner: app
storage: storage:
size: 1Gi size: 1Gi
resources:
requests:
cpu: 30m
memory: 50Mi
limits:
memory: 200Mi
--- ---
apiVersion: postgresql.cnpg.io/v1 apiVersion: postgresql.cnpg.io/v1
@@ -183,3 +243,9 @@ spec:
owner: app owner: app
storage: storage:
size: 1Gi size: 1Gi
resources:
requests:
cpu: 30m
memory: 50Mi
limits:
memory: 200Mi
@@ -56,13 +56,15 @@ spec:
spec: spec:
containers: containers:
- name: redis - name: redis
image: redis:8.6.2-alpine image: redis:8.6.3-alpine
ports: ports:
- containerPort: 6379 - containerPort: 6379
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 10Mi
limits:
memory: 60Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -95,6 +97,10 @@ spec:
labels: labels:
app: ratelimit app: ratelimit
spec: spec:
initContainers:
- name: wait-rl-redis
image: busybox:1.37
command: ["sh", "-c", "until nc -z rl-redis-svc 6379; do sleep 1; done"]
containers: containers:
- name: ratelimit - name: ratelimit
image: envoyproxy/ratelimit:fe26676d image: envoyproxy/ratelimit:fe26676d
@@ -124,8 +130,10 @@ spec:
mountPath: /data/ratelimit/config mountPath: /data/ratelimit/config
resources: resources:
requests: requests:
cpu: 50m cpu: 10m
memory: 64Mi memory: 10Mi
limits:
memory: 60Mi
volumes: volumes:
- name: config - name: config
configMap: configMap:
@@ -6,12 +6,14 @@ metadata:
spec: spec:
clusterSize: 1 clusterSize: 1
kubernetesConfig: kubernetesConfig:
image: quay.io/opstree/redis:v7.0.15 image: quay.io/opstree/redis:v8.6.2
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
resources: resources:
requests: requests:
cpu: 50m cpu: 5m
memory: 64Mi memory: 10Mi
limits:
memory: 80Mi
redisSecret: redisSecret:
name: user-redis name: user-redis
key: password key: password
@@ -35,12 +37,14 @@ metadata:
spec: spec:
clusterSize: 1 clusterSize: 1
kubernetesConfig: kubernetesConfig:
image: quay.io/opstree/redis:v7.0.15 image: quay.io/opstree/redis:v8.6.2
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
resources: resources:
requests: requests:
cpu: 50m cpu: 5m
memory: 64Mi memory: 10Mi
limits:
memory: 80Mi
redisSecret: redisSecret:
name: player-redis name: player-redis
key: password key: password
@@ -64,12 +68,14 @@ metadata:
spec: spec:
clusterSize: 1 clusterSize: 1
kubernetesConfig: kubernetesConfig:
image: quay.io/opstree/redis:v7.0.15 image: quay.io/opstree/redis:v8.6.2
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
resources: resources:
requests: requests:
cpu: 50m cpu: 5m
memory: 64Mi memory: 10Mi
limits:
memory: 80Mi
redisSecret: redisSecret:
name: game-redis name: game-redis
key: password key: password
@@ -93,12 +99,14 @@ metadata:
spec: spec:
clusterSize: 1 clusterSize: 1
kubernetesConfig: kubernetesConfig:
image: quay.io/opstree/redis:v7.0.15 image: quay.io/opstree/redis:v8.6.2
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
resources: resources:
requests: requests:
cpu: 50m cpu: 5m
memory: 64Mi memory: 10Mi
limits:
memory: 80Mi
redisSecret: redisSecret:
name: shop-redis name: shop-redis
key: password key: password
@@ -122,12 +130,14 @@ metadata:
spec: spec:
clusterSize: 1 clusterSize: 1
kubernetesConfig: kubernetesConfig:
image: quay.io/opstree/redis:v7.0.15 image: quay.io/opstree/redis:v8.6.2
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
resources: resources:
requests: requests:
cpu: 50m cpu: 5m
memory: 64Mi memory: 10Mi
limits:
memory: 80Mi
redisSecret: redisSecret:
name: order-redis name: order-redis
key: password key: password
@@ -151,12 +161,14 @@ metadata:
spec: spec:
clusterSize: 1 clusterSize: 1
kubernetesConfig: kubernetesConfig:
image: quay.io/opstree/redis:v7.0.15 image: quay.io/opstree/redis:v8.6.2
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
resources: resources:
requests: requests:
cpu: 50m cpu: 5m
memory: 64Mi memory: 10Mi
limits:
memory: 80Mi
redisSecret: redisSecret:
name: wallet-redis name: wallet-redis
key: password key: password
@@ -180,12 +192,14 @@ metadata:
spec: spec:
clusterSize: 1 clusterSize: 1
kubernetesConfig: kubernetesConfig:
image: quay.io/opstree/redis:v7.0.15 image: quay.io/opstree/redis:v8.6.2
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
resources: resources:
requests: requests:
cpu: 50m cpu: 5m
memory: 64Mi memory: 10Mi
limits:
memory: 80Mi
redisSecret: redisSecret:
name: community-redis name: community-redis
key: password key: password
@@ -209,12 +223,14 @@ metadata:
spec: spec:
clusterSize: 1 clusterSize: 1
kubernetesConfig: kubernetesConfig:
image: quay.io/opstree/redis:v7.0.15 image: quay.io/opstree/redis:v8.6.2
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
resources: resources:
requests: requests:
cpu: 50m cpu: 5m
memory: 64Mi memory: 10Mi
limits:
memory: 80Mi
redisSecret: redisSecret:
name: review-redis name: review-redis
key: password key: password
@@ -238,12 +254,14 @@ metadata:
spec: spec:
clusterSize: 1 clusterSize: 1
kubernetesConfig: kubernetesConfig:
image: quay.io/opstree/redis:v7.0.15 image: quay.io/opstree/redis:v8.6.2
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
resources: resources:
requests: requests:
cpu: 50m cpu: 5m
memory: 64Mi memory: 10Mi
limits:
memory: 80Mi
redisSecret: redisSecret:
name: dispute-redis name: dispute-redis
key: password key: password
@@ -267,12 +285,14 @@ metadata:
spec: spec:
clusterSize: 1 clusterSize: 1
kubernetesConfig: kubernetesConfig:
image: quay.io/opstree/redis:v7.0.15 image: quay.io/opstree/redis:v8.6.2
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
resources: resources:
requests: requests:
cpu: 50m cpu: 5m
memory: 64Mi memory: 10Mi
limits:
memory: 80Mi
redisSecret: redisSecret:
name: notification-redis name: notification-redis
key: password key: password
@@ -296,12 +316,14 @@ metadata:
spec: spec:
clusterSize: 1 clusterSize: 1
kubernetesConfig: kubernetesConfig:
image: quay.io/opstree/redis:v7.0.15 image: quay.io/opstree/redis:v8.6.2
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
resources: resources:
requests: requests:
cpu: 50m cpu: 5m
memory: 64Mi memory: 10Mi
limits:
memory: 80Mi
redisSecret: redisSecret:
name: search-redis name: search-redis
key: password key: password
@@ -325,12 +347,14 @@ metadata:
spec: spec:
clusterSize: 1 clusterSize: 1
kubernetesConfig: kubernetesConfig:
image: quay.io/opstree/redis:v7.0.15 image: quay.io/opstree/redis:v8.6.2
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
resources: resources:
requests: requests:
cpu: 50m cpu: 5m
memory: 64Mi memory: 10Mi
limits:
memory: 80Mi
redisSecret: redisSecret:
name: chat-redis name: chat-redis
key: password key: password
@@ -3,7 +3,7 @@ set -euo pipefail
REGISTRY_HOST="registry.juwan.xhttp.zip" REGISTRY_HOST="registry.juwan.xhttp.zip"
CNPG_VERSION="1.29.0" CNPG_VERSION="1.29.0"
STRIMZI_VERSION="1.0.0" REDPANDA_OP_VERSION="v26.1.3"
REDIS_OP_VERSION="0.24.0" REDIS_OP_VERSION="0.24.0"
MONGODB_OP_VERSION="1.8.0" MONGODB_OP_VERSION="1.8.0"
@@ -19,6 +19,8 @@ if [ ! -f /root/registry-password ]; then
exit 1 exit 1
fi fi
K01_DIR="$(cd "$(dirname "$0")" && pwd)"
write_registries() { write_registries() {
mkdir -p /etc/rancher/k3s mkdir -p /etc/rancher/k3s
cat > /etc/rancher/k3s/registries.yaml <<EOF cat > /etc/rancher/k3s/registries.yaml <<EOF
@@ -73,31 +75,50 @@ systemctl restart k3s
export KUBECONFIG=/etc/rancher/k3s/k3s.yaml export KUBECONFIG=/etc/rancher/k3s/k3s.yaml
until kubectl get nodes >/dev/null 2>&1; do sleep 2; done until kubectl get nodes >/dev/null 2>&1; do sleep 2; done
K01_DIR="$(cd "$(dirname "$0")" && pwd)" kubectl apply -f "${K01_DIR}/base/"
kubectl apply -f "${K01_DIR}/00-base/"
kubectl apply -f \ kubectl apply --server-side --force-conflicts -f \
"https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v${CNPG_VERSION}/cnpg-${CNPG_VERSION}.yaml" "https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v${CNPG_VERSION}/cnpg-${CNPG_VERSION}.yaml"
kubectl -n cnpg-system set resources deploy/cnpg-controller-manager \
--requests=cpu=30m,memory=40Mi --limits=cpu=200m,memory=200Mi
kubectl create namespace kafka 2>/dev/null || true kubectl create namespace redpanda 2>/dev/null || true
kubectl apply -n kafka \
-f "https://github.com/strimzi/strimzi-kafka-operator/releases/download/${STRIMZI_VERSION}/strimzi-cluster-operator-${STRIMZI_VERSION}.yaml"
helm repo add ot-helm https://ot-container-kit.github.io/helm-charts/ 2>/dev/null || true helm repo add ot-helm https://ot-container-kit.github.io/helm-charts/ 2>/dev/null || true
helm repo add mongodb https://mongodb.github.io/helm-charts 2>/dev/null || true helm repo add mongodb https://mongodb.github.io/helm-charts 2>/dev/null || true
helm repo add redpanda https://charts.redpanda.com 2>/dev/null || true
helm repo update helm repo update
helm upgrade --install redpanda-controller redpanda/operator \
--version "${REDPANDA_OP_VERSION}" \
--namespace redpanda \
--set crds.enabled=true \
--set resources.requests.cpu=30m \
--set resources.requests.memory=100Mi \
--set resources.limits.cpu=500m \
--set resources.limits.memory=300Mi \
--set-json 'livenessProbe={"initialDelaySeconds":30,"periodSeconds":60,"timeoutSeconds":10,"failureThreshold":5}' \
--set-json 'readinessProbe={"initialDelaySeconds":15,"periodSeconds":30,"timeoutSeconds":10,"failureThreshold":5}'
helm upgrade --install redis-operator ot-helm/redis-operator \ helm upgrade --install redis-operator ot-helm/redis-operator \
--version "${REDIS_OP_VERSION}" \ --version "${REDIS_OP_VERSION}" \
--namespace redis-operator --create-namespace --namespace redis-operator --create-namespace \
--set resources.requests.cpu=20m \
--set resources.requests.memory=30Mi \
--set resources.limits.cpu=500m \
--set resources.limits.memory=150Mi
helm upgrade --install mongodb-kubernetes mongodb/mongodb-kubernetes \ helm upgrade --install mongodb-kubernetes mongodb/mongodb-kubernetes \
--version "${MONGODB_OP_VERSION}" \ --version "${MONGODB_OP_VERSION}" \
--namespace mongodb-operator --create-namespace \ --namespace mongodb-operator --create-namespace \
--set operator.watchNamespace=juwan --set operator.watchNamespace=juwan \
--set operator.resources.requests.cpu=30m \
--set operator.resources.requests.memory=50Mi \
--set operator.resources.limits.cpu=500m \
--set operator.resources.limits.memory=200Mi
kubectl -n cnpg-system rollout status deploy/cnpg-controller-manager --timeout=300s kubectl -n cnpg-system rollout status deploy/cnpg-controller-manager --timeout=300s
kubectl -n kafka rollout status deploy/strimzi-cluster-operator --timeout=300s kubectl -n redpanda rollout status deploy/redpanda-controller-operator --timeout=300s
kubectl -n redis-operator rollout status deploy/redis-operator --timeout=300s kubectl -n redis-operator rollout status deploy/redis-operator --timeout=300s
kubectl -n mongodb-operator rollout status deploy/mongodb-kubernetes-operator --timeout=300s kubectl -n mongodb-operator rollout status deploy/mongodb-kubernetes-operator --timeout=300s
+5 -1
View File
@@ -69,7 +69,11 @@ kubectl -n juwan create secret tls chat-wt-tls \
--key="${DEV_CERTS}/tls.key" \ --key="${DEV_CERTS}/tls.key" \
--dry-run=client -o yaml | kubectl apply -f - --dry-run=client -o yaml | kubectl apply -f -
DOMAINS=(user player game shop order wallet community review dispute notification search chat) DOMAINS=()
while IFS= read -r name; do
DOMAINS+=("${name%-redis}")
done < <(grep -E '^ name: [a-z-]+-redis$' "$K01_DIR/infra/redis.yaml" | awk '{print $2}')
for d in "${DOMAINS[@]}"; do for d in "${DOMAINS[@]}"; do
pwd_val="$(openssl rand -hex 16)" pwd_val="$(openssl rand -hex 16)"
write_secret "redis-${d}-password" "$pwd_val" write_secret "redis-${d}-password" "$pwd_val"
@@ -30,8 +30,10 @@ spec:
value: "user-rpc-svc.juwan:8080" value: "user-rpc-svc.juwan:8080"
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -65,8 +65,10 @@ spec:
readOnly: true readOnly: true
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
volumes: volumes:
- name: certs - name: certs
secret: secret:
@@ -60,8 +60,10 @@ spec:
value: "snowflake-svc.juwan:8080" value: "snowflake-svc.juwan:8080"
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -115,8 +117,10 @@ spec:
value: "user-rpc-svc.juwan:8080" value: "user-rpc-svc.juwan:8080"
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -60,8 +60,10 @@ spec:
value: "snowflake-svc.juwan:8080" value: "snowflake-svc.juwan:8080"
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -117,8 +119,10 @@ spec:
value: "player-rpc-svc.juwan:8080" value: "player-rpc-svc.juwan:8080"
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -36,11 +36,13 @@ spec:
name: user-redis name: user-redis
key: password key: password
- name: KAFKA_BROKER - name: KAFKA_BROKER
value: "juwan-kafka-kafka-bootstrap.kafka:9092" value: "juwan-kafka-0.juwan-kafka.redpanda:9092"
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -87,7 +89,7 @@ spec:
containerPort: 4001 containerPort: 4001
env: env:
- name: KAFKA_BROKER - name: KAFKA_BROKER
value: "juwan-kafka-kafka-bootstrap.kafka:9092" value: "juwan-kafka-0.juwan-kafka.redpanda:9092"
- name: EMAIL_SMTP_HOST - name: EMAIL_SMTP_HOST
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
@@ -125,8 +127,10 @@ spec:
key: reply-to key: reply-to
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -25,8 +25,10 @@ spec:
containerPort: 3000 containerPort: 3000
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -60,8 +60,10 @@ spec:
value: "snowflake-svc.juwan:8080" value: "snowflake-svc.juwan:8080"
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -113,8 +115,10 @@ spec:
value: "game-rpc-svc.juwan:8080" value: "game-rpc-svc.juwan:8080"
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -60,8 +60,10 @@ spec:
value: "snowflake-svc.juwan:8080" value: "snowflake-svc.juwan:8080"
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -113,8 +115,10 @@ spec:
value: "notification-rpc-svc.juwan:8080" value: "notification-rpc-svc.juwan:8080"
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -53,8 +53,10 @@ spec:
key: region key: region
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -106,8 +108,10 @@ spec:
value: "objectstory-rpc-svc.juwan:8080" value: "objectstory-rpc-svc.juwan:8080"
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -60,8 +60,10 @@ spec:
value: "snowflake-svc.juwan:8080" value: "snowflake-svc.juwan:8080"
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -117,8 +119,10 @@ spec:
value: "shop-rpc-svc.juwan:8080" value: "shop-rpc-svc.juwan:8080"
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -60,8 +60,10 @@ spec:
value: "snowflake-svc.juwan:8080" value: "snowflake-svc.juwan:8080"
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -115,8 +117,10 @@ spec:
value: "user-rpc-svc.juwan:8080" value: "user-rpc-svc.juwan:8080"
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -60,8 +60,10 @@ spec:
value: "snowflake-svc.juwan:8080" value: "snowflake-svc.juwan:8080"
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -117,8 +119,10 @@ spec:
value: "review-rpc-svc.juwan:8080" value: "review-rpc-svc.juwan:8080"
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -60,8 +60,10 @@ spec:
value: "snowflake-svc.juwan:8080" value: "snowflake-svc.juwan:8080"
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -113,8 +115,10 @@ spec:
value: "search-rpc-svc.juwan:8080" value: "search-rpc-svc.juwan:8080"
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -62,8 +62,10 @@ spec:
value: "user-rpc-svc.juwan:8080" value: "user-rpc-svc.juwan:8080"
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -117,8 +119,10 @@ spec:
value: "shop-rpc-svc.juwan:8080" value: "shop-rpc-svc.juwan:8080"
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -38,8 +38,10 @@ spec:
containerPort: 8080 containerPort: 8080
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -62,8 +62,10 @@ spec:
value: "user-rpc-svc.juwan:8080" value: "user-rpc-svc.juwan:8080"
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -80,8 +80,10 @@ spec:
key: secret-key key: secret-key
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -135,8 +137,10 @@ spec:
value: "user-verifications-rpc-svc.juwan:8080" value: "user-verifications-rpc-svc.juwan:8080"
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -60,8 +60,10 @@ spec:
value: "snowflake-svc.juwan:8080" value: "snowflake-svc.juwan:8080"
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
@@ -113,8 +115,10 @@ spec:
value: "wallet-rpc-svc.juwan:8080" value: "wallet-rpc-svc.juwan:8080"
resources: resources:
requests: requests:
cpu: 20m cpu: 10m
memory: 32Mi memory: 32Mi
limits:
memory: 512Mi
--- ---
apiVersion: v1 apiVersion: v1
+33
View File
@@ -0,0 +1,33 @@
#!/usr/bin/env bash
set -euo pipefail
K01_DIR="$(cd "$(dirname "$0")" && pwd)"
export KUBECONFIG="${KUBECONFIG:-/etc/rancher/k3s/k3s.yaml}"
echo services
for f in "${K01_DIR}/services/"*.yaml; do
kubectl delete -f "$f" --ignore-not-found --wait=false
done
echo data crs
kubectl -n juwan delete cluster.postgresql.cnpg.io --all --wait=false 2>/dev/null || true
kubectl -n juwan delete redisreplication --all --wait=false 2>/dev/null || true
kubectl -n juwan delete redissentinel --all --wait=false 2>/dev/null || true
kubectl -n juwan delete mongodbcommunity --all --wait=false 2>/dev/null || true
kubectl -n redpanda delete topic --all --wait=false 2>/dev/null || true
kubectl -n redpanda delete redpanda --all --wait=false 2>/dev/null || true
echo network
kubectl delete -f "${K01_DIR}/infra/envoy.yaml" --ignore-not-found --wait=false
kubectl delete -f "${K01_DIR}/infra/ratelimit.yaml" --ignore-not-found --wait=false
sleep 30
echo cleanup orphaned
kubectl -n juwan delete pod --all --force --grace-period=0 2>/dev/null || true
kubectl -n juwan delete pvc --all --wait=false 2>/dev/null || true
kubectl -n redpanda delete pvc -l app.kubernetes.io/instance=juwan-kafka --wait=false 2>/dev/null || true
kubectl get pods,pvc -n juwan
kubectl get pods,pvc -n redpanda
kubectl describe node | grep -A 6 Allocated