From 4898aecd3b3a45d3c737bb65e1dc5776a10349e2 Mon Sep 17 00:00:00 2001
From: wwweww <2646787260@qq.com>
Date: Mon, 23 Feb 2026 15:54:33 +0800
Subject: [PATCH] add: envoy redis
---
.idea/.gitignore | 8 +
.idea/modules.xml | 8 +
.idea/st-1-example.iml | 9 +
.idea/vcs.xml | 6 +
app/user/api/internal/handler/routes.go | 25 -
app/user/api/internal/logic/userInfoLogic.go | 43 -
app/user/api/internal/svc/serviceContext.go | 23 -
app/user/api/internal/types/types.go | 13 -
app/user/rpc/etc/pb.yaml | 6 -
app/user/rpc/internal/config/config.go | 10 -
.../rpc/internal/logic/getUserInfoLogic.go | 41 -
.../rpc/internal/server/usercenterServer.go | 29 -
app/user/rpc/internal/svc/serviceContext.go | 13 -
app/user/rpc/pb/user.pb.go | 184 --
app/user/rpc/pb/user_grpc.pb.go | 121 --
app/user/rpc/usercenter/usercenter.go | 38 -
app/{user => users}/api/etc/user-api.yaml | 5 +
.../api/internal/config/config.go | 0
app/users/api/internal/handler/routes.go | 60 +
.../handler/user/getUserInfoHandler.go | 32 +
.../api/internal/handler/user/loginHandler.go | 32 +
.../internal/handler/user/logoutHandler.go | 32 +
.../internal/handler/user/registerHandler.go} | 18 +-
.../handler/user/updatePasswordHandler.go | 32 +
.../handler/user/updateUserInfoHandler.go | 32 +
.../internal/logic/user/getUserInfoLogic.go | 34 +
.../api/internal/logic/user/loginLogic.go | 34 +
.../api/internal/logic/user/logoutLogic.go | 34 +
.../api/internal/logic/user/registerLogic.go | 55 +
.../logic/user/updatePasswordLogic.go | 34 +
.../logic/user/updateUserInfoLogic.go | 34 +
.../internal/middleware/loggerMiddleware.go | 22 +
app/users/api/internal/svc/serviceContext.go | 27 +
app/users/api/internal/types/types.go | 81 +
app/{user => users}/api/user.go | 6 +-
app/users/rpc/etc/pb.yaml | 15 +
app/users/rpc/internal/config/config.go | 12 +
app/users/rpc/internal/logic/addUsersLogic.go | 31 +
app/users/rpc/internal/logic/delUsersLogic.go | 30 +
.../internal/logic/getUserByUsernameLogic.go | 30 +
.../rpc/internal/logic/getUsersByIdLogic.go | 30 +
.../rpc/internal/logic/searchUsersLogic.go | 30 +
.../rpc/internal/logic/updateUsersLogic.go | 30 +
app/users/rpc/internal/models/usersmodel.go | 27 +
.../rpc/internal/models/usersmodel_gen.go | 180 ++
app/users/rpc/internal/models/vars.go | 5 +
.../rpc/internal/server/usercenterServer.go | 55 +
app/users/rpc/internal/svc/serviceContext.go | 47 +
app/users/rpc/internal/utils/jwt.go | 30 +
app/{user => users}/rpc/pb.go | 10 +-
app/users/rpc/pb/users.pb.go | 1073 ++++++++++++
app/users/rpc/pb/users_grpc.pb.go | 313 ++++
app/users/rpc/usercenter/usercenter.go | 81 +
backup/user-rpc.yaml.backup | 233 +++
deploy/k8s/envoy/envoy.yaml | 157 ++
deploy/k8s/monitoring/00-namespace.yaml | 4 +
deploy/k8s/monitoring/grafana.yaml | 82 +
deploy/k8s/monitoring/loki.yaml | 90 +
deploy/k8s/monitoring/prometheus.yaml | 138 ++
deploy/k8s/monitoring/promtail.yaml | 108 ++
deploy/k8s/{base => }/postgreSql.yaml | 0
deploy/k8s/{ => service/user}/user-api.yaml | 0
deploy/k8s/{ => service/user}/user-rpc.yaml | 57 +-
deploy/script/init-secrets.sh | 3 +
desc/api/user.api | 26 -
desc/api/users.api | 124 ++
desc/rpc/user.proto | 18 -
desc/rpc/users.proto | 116 ++
desc/sql/users.sql | 23 +-
docs/README.md | 95 ++
docs/deployment-troubleshooting.md | 385 +++++
docs/gozero-redis-configuration.md | 1497 +++++++++++++++++
docs/kubernetes-service-explanation.md | 743 ++++++++
docs/redis-sentinel-troubleshooting.md | 779 +++++++++
docs/redis-services-guide.md | 1179 +++++++++++++
docs/redis-username-discovery.md | 1068 ++++++++++++
go.mod | 2 +
go.sum | 6 +
test.js | 27 -
79 files changed, 9520 insertions(+), 650 deletions(-)
create mode 100644 .idea/.gitignore
create mode 100644 .idea/modules.xml
create mode 100644 .idea/st-1-example.iml
create mode 100644 .idea/vcs.xml
delete mode 100644 app/user/api/internal/handler/routes.go
delete mode 100644 app/user/api/internal/logic/userInfoLogic.go
delete mode 100644 app/user/api/internal/svc/serviceContext.go
delete mode 100644 app/user/api/internal/types/types.go
delete mode 100644 app/user/rpc/etc/pb.yaml
delete mode 100644 app/user/rpc/internal/config/config.go
delete mode 100644 app/user/rpc/internal/logic/getUserInfoLogic.go
delete mode 100644 app/user/rpc/internal/server/usercenterServer.go
delete mode 100644 app/user/rpc/internal/svc/serviceContext.go
delete mode 100644 app/user/rpc/pb/user.pb.go
delete mode 100644 app/user/rpc/pb/user_grpc.pb.go
delete mode 100644 app/user/rpc/usercenter/usercenter.go
rename app/{user => users}/api/etc/user-api.yaml (62%)
rename app/{user => users}/api/internal/config/config.go (100%)
create mode 100644 app/users/api/internal/handler/routes.go
create mode 100644 app/users/api/internal/handler/user/getUserInfoHandler.go
create mode 100644 app/users/api/internal/handler/user/loginHandler.go
create mode 100644 app/users/api/internal/handler/user/logoutHandler.go
rename app/{user/api/internal/handler/userInfoHandler.go => users/api/internal/handler/user/registerHandler.go} (53%)
create mode 100644 app/users/api/internal/handler/user/updatePasswordHandler.go
create mode 100644 app/users/api/internal/handler/user/updateUserInfoHandler.go
create mode 100644 app/users/api/internal/logic/user/getUserInfoLogic.go
create mode 100644 app/users/api/internal/logic/user/loginLogic.go
create mode 100644 app/users/api/internal/logic/user/logoutLogic.go
create mode 100644 app/users/api/internal/logic/user/registerLogic.go
create mode 100644 app/users/api/internal/logic/user/updatePasswordLogic.go
create mode 100644 app/users/api/internal/logic/user/updateUserInfoLogic.go
create mode 100644 app/users/api/internal/middleware/loggerMiddleware.go
create mode 100644 app/users/api/internal/svc/serviceContext.go
create mode 100644 app/users/api/internal/types/types.go
rename app/{user => users}/api/user.go (79%)
create mode 100644 app/users/rpc/etc/pb.yaml
create mode 100644 app/users/rpc/internal/config/config.go
create mode 100644 app/users/rpc/internal/logic/addUsersLogic.go
create mode 100644 app/users/rpc/internal/logic/delUsersLogic.go
create mode 100644 app/users/rpc/internal/logic/getUserByUsernameLogic.go
create mode 100644 app/users/rpc/internal/logic/getUsersByIdLogic.go
create mode 100644 app/users/rpc/internal/logic/searchUsersLogic.go
create mode 100644 app/users/rpc/internal/logic/updateUsersLogic.go
create mode 100644 app/users/rpc/internal/models/usersmodel.go
create mode 100644 app/users/rpc/internal/models/usersmodel_gen.go
create mode 100644 app/users/rpc/internal/models/vars.go
create mode 100644 app/users/rpc/internal/server/usercenterServer.go
create mode 100644 app/users/rpc/internal/svc/serviceContext.go
create mode 100644 app/users/rpc/internal/utils/jwt.go
rename app/{user => users}/rpc/pb.go (76%)
create mode 100644 app/users/rpc/pb/users.pb.go
create mode 100644 app/users/rpc/pb/users_grpc.pb.go
create mode 100644 app/users/rpc/usercenter/usercenter.go
create mode 100644 backup/user-rpc.yaml.backup
create mode 100644 deploy/k8s/envoy/envoy.yaml
create mode 100644 deploy/k8s/monitoring/00-namespace.yaml
create mode 100644 deploy/k8s/monitoring/grafana.yaml
create mode 100644 deploy/k8s/monitoring/loki.yaml
create mode 100644 deploy/k8s/monitoring/prometheus.yaml
create mode 100644 deploy/k8s/monitoring/promtail.yaml
rename deploy/k8s/{base => }/postgreSql.yaml (100%)
rename deploy/k8s/{ => service/user}/user-api.yaml (100%)
rename deploy/k8s/{ => service/user}/user-rpc.yaml (70%)
create mode 100644 deploy/script/init-secrets.sh
delete mode 100644 desc/api/user.api
create mode 100644 desc/api/users.api
delete mode 100644 desc/rpc/user.proto
create mode 100644 desc/rpc/users.proto
create mode 100644 docs/README.md
create mode 100644 docs/deployment-troubleshooting.md
create mode 100644 docs/gozero-redis-configuration.md
create mode 100644 docs/kubernetes-service-explanation.md
create mode 100644 docs/redis-sentinel-troubleshooting.md
create mode 100644 docs/redis-services-guide.md
create mode 100644 docs/redis-username-discovery.md
delete mode 100644 test.js
diff --git a/.idea/.gitignore b/.idea/.gitignore
new file mode 100644
index 0000000..1c2fda5
--- /dev/null
+++ b/.idea/.gitignore
@@ -0,0 +1,8 @@
+# Default ignored files
+/shelf/
+/workspace.xml
+# Editor-based HTTP Client requests
+/httpRequests/
+# Datasource local storage ignored files
+/dataSources/
+/dataSources.local.xml
diff --git a/.idea/modules.xml b/.idea/modules.xml
new file mode 100644
index 0000000..7fbf49e
--- /dev/null
+++ b/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/st-1-example.iml b/.idea/st-1-example.iml
new file mode 100644
index 0000000..338a266
--- /dev/null
+++ b/.idea/st-1-example.iml
@@ -0,0 +1,9 @@
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
new file mode 100644
index 0000000..c8397c9
--- /dev/null
+++ b/.idea/vcs.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/app/user/api/internal/handler/routes.go b/app/user/api/internal/handler/routes.go
deleted file mode 100644
index a6b807a..0000000
--- a/app/user/api/internal/handler/routes.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Code generated by goctl. DO NOT EDIT.
-// goctl 1.9.2
-
-package handler
-
-import (
- "net/http"
-
- "juwan-backend/app/user/api/internal/svc"
-
- "github.com/zeromicro/go-zero/rest"
-)
-
-func RegisterHandlers(server *rest.Server, serverCtx *svc.ServiceContext) {
- server.AddRoutes(
- []rest.Route{
- {
- // Get user infomaction by user id
- Method: http.MethodPost,
- Path: "/user/info",
- Handler: userInfoHandler(serverCtx),
- },
- },
- )
-}
diff --git a/app/user/api/internal/logic/userInfoLogic.go b/app/user/api/internal/logic/userInfoLogic.go
deleted file mode 100644
index 5a3e5ce..0000000
--- a/app/user/api/internal/logic/userInfoLogic.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Code scaffolded by goctl. Safe to edit.
-// goctl 1.9.2
-
-package logic
-
-import (
- "context"
-
- "juwan-backend/app/user/api/internal/svc"
- "juwan-backend/app/user/api/internal/types"
- "juwan-backend/app/user/rpc/usercenter"
-
- "github.com/zeromicro/go-zero/core/logx"
-)
-
-type UserInfoLogic struct {
- logx.Logger
- ctx context.Context
- svcCtx *svc.ServiceContext
-}
-
-// Get user infomaction by user id
-func NewUserInfoLogic(ctx context.Context, svcCtx *svc.ServiceContext) *UserInfoLogic {
- return &UserInfoLogic{
- Logger: logx.WithContext(ctx),
- ctx: ctx,
- svcCtx: svcCtx,
- }
-}
-
-func (l *UserInfoLogic) UserInfo(req *types.UserInfoReq) (resp *types.UserInfoResp, err error) {
- logx.Infof("Request user info, user id: %d", req.UserId)
- res, err := l.svcCtx.Usercenter.GetUserInfo(l.ctx, &usercenter.GetUserInfoReq{
- Id: req.UserId,
- })
- if err != nil {
- return nil, err
- }
- return &types.UserInfoResp{
- UserId: res.Id,
- Nickname: res.Nickname,
- }, nil
-}
diff --git a/app/user/api/internal/svc/serviceContext.go b/app/user/api/internal/svc/serviceContext.go
deleted file mode 100644
index 0f28d14..0000000
--- a/app/user/api/internal/svc/serviceContext.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Code scaffolded by goctl. Safe to edit.
-// goctl 1.9.2
-
-package svc
-
-import (
- "juwan-backend/app/user/api/internal/config"
- "juwan-backend/app/user/rpc/usercenter"
-
- "github.com/zeromicro/go-zero/zrpc"
-)
-
-type ServiceContext struct {
- Config config.Config
- Usercenter usercenter.Usercenter
-}
-
-func NewServiceContext(c config.Config) *ServiceContext {
- return &ServiceContext{
- Config: c,
- Usercenter: usercenter.NewUsercenter(zrpc.MustNewClient(c.UsercenterRpcConf)),
- }
-}
diff --git a/app/user/api/internal/types/types.go b/app/user/api/internal/types/types.go
deleted file mode 100644
index 043a1f8..0000000
--- a/app/user/api/internal/types/types.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Code generated by goctl. DO NOT EDIT.
-// goctl 1.9.2
-
-package types
-
-type UserInfoReq struct {
- UserId int64 `json:"userId"`
-}
-
-type UserInfoResp struct {
- UserId int64 `json:"userId"`
- Nickname string `json:"nickname"`
-}
diff --git a/app/user/rpc/etc/pb.yaml b/app/user/rpc/etc/pb.yaml
deleted file mode 100644
index de73da4..0000000
--- a/app/user/rpc/etc/pb.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-Name: pb.rpc
-ListenOn: 0.0.0.0:9001
-
-# UserDB: "${DB_URI}?sslmode=disable"
-DB:
- UserDB: "${DB_URI}?sslmode=disable"
diff --git a/app/user/rpc/internal/config/config.go b/app/user/rpc/internal/config/config.go
deleted file mode 100644
index 98e5d8d..0000000
--- a/app/user/rpc/internal/config/config.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package config
-
-import "github.com/zeromicro/go-zero/zrpc"
-
-type Config struct {
- zrpc.RpcServerConf
- DB struct {
- UserDB string
- }
-}
diff --git a/app/user/rpc/internal/logic/getUserInfoLogic.go b/app/user/rpc/internal/logic/getUserInfoLogic.go
deleted file mode 100644
index ac4144f..0000000
--- a/app/user/rpc/internal/logic/getUserInfoLogic.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package logic
-
-import (
- "context"
-
- "juwan-backend/app/user/rpc/internal/svc"
- "juwan-backend/app/user/rpc/pb"
-
- "github.com/zeromicro/go-zero/core/logx"
-)
-
-type GetUserInfoLogic struct {
- ctx context.Context
- svcCtx *svc.ServiceContext
- logx.Logger
-}
-
-func NewGetUserInfoLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetUserInfoLogic {
- return &GetUserInfoLogic{
- ctx: ctx,
- svcCtx: svcCtx,
- Logger: logx.WithContext(ctx),
- }
-}
-
-func (l *GetUserInfoLogic) GetUserInfo(in *pb.GetUserInfoReq) (*pb.GetUserInfoResp, error) {
- users := map[int64]string{
- 1: "WangHuahua",
- 2: "LiKunkun",
- }
-
- nikename := "Unknow"
- if name, ok := users[in.Id]; ok {
- nikename = name
- }
-
- return &pb.GetUserInfoResp{
- Id: in.Id,
- Nickname: nikename,
- }, nil
-}
diff --git a/app/user/rpc/internal/server/usercenterServer.go b/app/user/rpc/internal/server/usercenterServer.go
deleted file mode 100644
index 5cdf98c..0000000
--- a/app/user/rpc/internal/server/usercenterServer.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Code generated by goctl. DO NOT EDIT.
-// goctl 1.9.2
-// Source: user.proto
-
-package server
-
-import (
- "context"
-
- "juwan-backend/app/user/rpc/internal/logic"
- "juwan-backend/app/user/rpc/internal/svc"
- "juwan-backend/app/user/rpc/pb"
-)
-
-type UsercenterServer struct {
- svcCtx *svc.ServiceContext
- pb.UnimplementedUsercenterServer
-}
-
-func NewUsercenterServer(svcCtx *svc.ServiceContext) *UsercenterServer {
- return &UsercenterServer{
- svcCtx: svcCtx,
- }
-}
-
-func (s *UsercenterServer) GetUserInfo(ctx context.Context, in *pb.GetUserInfoReq) (*pb.GetUserInfoResp, error) {
- l := logic.NewGetUserInfoLogic(ctx, s.svcCtx)
- return l.GetUserInfo(in)
-}
diff --git a/app/user/rpc/internal/svc/serviceContext.go b/app/user/rpc/internal/svc/serviceContext.go
deleted file mode 100644
index 3b889b6..0000000
--- a/app/user/rpc/internal/svc/serviceContext.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package svc
-
-import "juwan-backend/app/user/rpc/internal/config"
-
-type ServiceContext struct {
- Config config.Config
-}
-
-func NewServiceContext(c config.Config) *ServiceContext {
- return &ServiceContext{
- Config: c,
- }
-}
diff --git a/app/user/rpc/pb/user.pb.go b/app/user/rpc/pb/user.pb.go
deleted file mode 100644
index bb5eedf..0000000
--- a/app/user/rpc/pb/user.pb.go
+++ /dev/null
@@ -1,184 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.36.9
-// protoc v6.32.0
-// source: user.proto
-
-package pb
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
- unsafe "unsafe"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-type GetUserInfoReq struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-func (x *GetUserInfoReq) Reset() {
- *x = GetUserInfoReq{}
- mi := &file_user_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *GetUserInfoReq) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetUserInfoReq) ProtoMessage() {}
-
-func (x *GetUserInfoReq) ProtoReflect() protoreflect.Message {
- mi := &file_user_proto_msgTypes[0]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetUserInfoReq.ProtoReflect.Descriptor instead.
-func (*GetUserInfoReq) Descriptor() ([]byte, []int) {
- return file_user_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *GetUserInfoReq) GetId() int64 {
- if x != nil {
- return x.Id
- }
- return 0
-}
-
-type GetUserInfoResp struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
- Nickname string `protobuf:"bytes,2,opt,name=nickname,proto3" json:"nickname,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-func (x *GetUserInfoResp) Reset() {
- *x = GetUserInfoResp{}
- mi := &file_user_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *GetUserInfoResp) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetUserInfoResp) ProtoMessage() {}
-
-func (x *GetUserInfoResp) ProtoReflect() protoreflect.Message {
- mi := &file_user_proto_msgTypes[1]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetUserInfoResp.ProtoReflect.Descriptor instead.
-func (*GetUserInfoResp) Descriptor() ([]byte, []int) {
- return file_user_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *GetUserInfoResp) GetId() int64 {
- if x != nil {
- return x.Id
- }
- return 0
-}
-
-func (x *GetUserInfoResp) GetNickname() string {
- if x != nil {
- return x.Nickname
- }
- return ""
-}
-
-var File_user_proto protoreflect.FileDescriptor
-
-const file_user_proto_rawDesc = "" +
- "\n" +
- "\n" +
- "user.proto\x12\x02pb\" \n" +
- "\x0eGetUserInfoReq\x12\x0e\n" +
- "\x02id\x18\x01 \x01(\x03R\x02id\"=\n" +
- "\x0fGetUserInfoResp\x12\x0e\n" +
- "\x02id\x18\x01 \x01(\x03R\x02id\x12\x1a\n" +
- "\bnickname\x18\x02 \x01(\tR\bnickname2D\n" +
- "\n" +
- "Usercenter\x126\n" +
- "\vGetUserInfo\x12\x12.pb.GetUserInfoReq\x1a\x13.pb.GetUserInfoRespB\x06Z\x04./pbb\x06proto3"
-
-var (
- file_user_proto_rawDescOnce sync.Once
- file_user_proto_rawDescData []byte
-)
-
-func file_user_proto_rawDescGZIP() []byte {
- file_user_proto_rawDescOnce.Do(func() {
- file_user_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_user_proto_rawDesc), len(file_user_proto_rawDesc)))
- })
- return file_user_proto_rawDescData
-}
-
-var file_user_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
-var file_user_proto_goTypes = []any{
- (*GetUserInfoReq)(nil), // 0: pb.GetUserInfoReq
- (*GetUserInfoResp)(nil), // 1: pb.GetUserInfoResp
-}
-var file_user_proto_depIdxs = []int32{
- 0, // 0: pb.Usercenter.GetUserInfo:input_type -> pb.GetUserInfoReq
- 1, // 1: pb.Usercenter.GetUserInfo:output_type -> pb.GetUserInfoResp
- 1, // [1:2] is the sub-list for method output_type
- 0, // [0:1] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_user_proto_init() }
-func file_user_proto_init() {
- if File_user_proto != nil {
- return
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: unsafe.Slice(unsafe.StringData(file_user_proto_rawDesc), len(file_user_proto_rawDesc)),
- NumEnums: 0,
- NumMessages: 2,
- NumExtensions: 0,
- NumServices: 1,
- },
- GoTypes: file_user_proto_goTypes,
- DependencyIndexes: file_user_proto_depIdxs,
- MessageInfos: file_user_proto_msgTypes,
- }.Build()
- File_user_proto = out.File
- file_user_proto_goTypes = nil
- file_user_proto_depIdxs = nil
-}
diff --git a/app/user/rpc/pb/user_grpc.pb.go b/app/user/rpc/pb/user_grpc.pb.go
deleted file mode 100644
index bd44062..0000000
--- a/app/user/rpc/pb/user_grpc.pb.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
-// versions:
-// - protoc-gen-go-grpc v1.5.1
-// - protoc v6.32.0
-// source: user.proto
-
-package pb
-
-import (
- context "context"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
-)
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-// Requires gRPC-Go v1.64.0 or later.
-const _ = grpc.SupportPackageIsVersion9
-
-const (
- Usercenter_GetUserInfo_FullMethodName = "/pb.Usercenter/GetUserInfo"
-)
-
-// UsercenterClient is the client API for Usercenter service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
-type UsercenterClient interface {
- GetUserInfo(ctx context.Context, in *GetUserInfoReq, opts ...grpc.CallOption) (*GetUserInfoResp, error)
-}
-
-type usercenterClient struct {
- cc grpc.ClientConnInterface
-}
-
-func NewUsercenterClient(cc grpc.ClientConnInterface) UsercenterClient {
- return &usercenterClient{cc}
-}
-
-func (c *usercenterClient) GetUserInfo(ctx context.Context, in *GetUserInfoReq, opts ...grpc.CallOption) (*GetUserInfoResp, error) {
- cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
- out := new(GetUserInfoResp)
- err := c.cc.Invoke(ctx, Usercenter_GetUserInfo_FullMethodName, in, out, cOpts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// UsercenterServer is the server API for Usercenter service.
-// All implementations must embed UnimplementedUsercenterServer
-// for forward compatibility.
-type UsercenterServer interface {
- GetUserInfo(context.Context, *GetUserInfoReq) (*GetUserInfoResp, error)
- mustEmbedUnimplementedUsercenterServer()
-}
-
-// UnimplementedUsercenterServer must be embedded to have
-// forward compatible implementations.
-//
-// NOTE: this should be embedded by value instead of pointer to avoid a nil
-// pointer dereference when methods are called.
-type UnimplementedUsercenterServer struct{}
-
-func (UnimplementedUsercenterServer) GetUserInfo(context.Context, *GetUserInfoReq) (*GetUserInfoResp, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetUserInfo not implemented")
-}
-func (UnimplementedUsercenterServer) mustEmbedUnimplementedUsercenterServer() {}
-func (UnimplementedUsercenterServer) testEmbeddedByValue() {}
-
-// UnsafeUsercenterServer may be embedded to opt out of forward compatibility for this service.
-// Use of this interface is not recommended, as added methods to UsercenterServer will
-// result in compilation errors.
-type UnsafeUsercenterServer interface {
- mustEmbedUnimplementedUsercenterServer()
-}
-
-func RegisterUsercenterServer(s grpc.ServiceRegistrar, srv UsercenterServer) {
- // If the following call pancis, it indicates UnimplementedUsercenterServer was
- // embedded by pointer and is nil. This will cause panics if an
- // unimplemented method is ever invoked, so we test this at initialization
- // time to prevent it from happening at runtime later due to I/O.
- if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
- t.testEmbeddedByValue()
- }
- s.RegisterService(&Usercenter_ServiceDesc, srv)
-}
-
-func _Usercenter_GetUserInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetUserInfoReq)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(UsercenterServer).GetUserInfo(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: Usercenter_GetUserInfo_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(UsercenterServer).GetUserInfo(ctx, req.(*GetUserInfoReq))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-// Usercenter_ServiceDesc is the grpc.ServiceDesc for Usercenter service.
-// It's only intended for direct use with grpc.RegisterService,
-// and not to be introspected or modified (even as a copy)
-var Usercenter_ServiceDesc = grpc.ServiceDesc{
- ServiceName: "pb.Usercenter",
- HandlerType: (*UsercenterServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "GetUserInfo",
- Handler: _Usercenter_GetUserInfo_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "user.proto",
-}
diff --git a/app/user/rpc/usercenter/usercenter.go b/app/user/rpc/usercenter/usercenter.go
deleted file mode 100644
index 9159075..0000000
--- a/app/user/rpc/usercenter/usercenter.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Code generated by goctl. DO NOT EDIT.
-// goctl 1.9.2
-// Source: user.proto
-
-package usercenter
-
-import (
- "context"
-
- "juwan-backend/app/user/rpc/pb"
-
- "github.com/zeromicro/go-zero/zrpc"
- "google.golang.org/grpc"
-)
-
-type (
- GetUserInfoReq = pb.GetUserInfoReq
- GetUserInfoResp = pb.GetUserInfoResp
-
- Usercenter interface {
- GetUserInfo(ctx context.Context, in *GetUserInfoReq, opts ...grpc.CallOption) (*GetUserInfoResp, error)
- }
-
- defaultUsercenter struct {
- cli zrpc.Client
- }
-)
-
-func NewUsercenter(cli zrpc.Client) Usercenter {
- return &defaultUsercenter{
- cli: cli,
- }
-}
-
-func (m *defaultUsercenter) GetUserInfo(ctx context.Context, in *GetUserInfoReq, opts ...grpc.CallOption) (*GetUserInfoResp, error) {
- client := pb.NewUsercenterClient(m.cli.Conn())
- return client.GetUserInfo(ctx, in, opts...)
-}
diff --git a/app/user/api/etc/user-api.yaml b/app/users/api/etc/user-api.yaml
similarity index 62%
rename from app/user/api/etc/user-api.yaml
rename to app/users/api/etc/user-api.yaml
index c208de4..3db86be 100644
--- a/app/user/api/etc/user-api.yaml
+++ b/app/users/api/etc/user-api.yaml
@@ -2,5 +2,10 @@ Name: user-api
Host: 0.0.0.0
Port: 8888
+Prometheus:
+ Host: 0.0.0.0
+ Port: 4001
+ Path: /metrics
+
UsercenterRpcConf:
Target: k8s://juwan/user-rpc-svc:9001
diff --git a/app/user/api/internal/config/config.go b/app/users/api/internal/config/config.go
similarity index 100%
rename from app/user/api/internal/config/config.go
rename to app/users/api/internal/config/config.go
diff --git a/app/users/api/internal/handler/routes.go b/app/users/api/internal/handler/routes.go
new file mode 100644
index 0000000..9d380a8
--- /dev/null
+++ b/app/users/api/internal/handler/routes.go
@@ -0,0 +1,60 @@
+// Code generated by goctl. DO NOT EDIT.
+// goctl 1.9.2
+
+package handler
+
+import (
+ "net/http"
+
+ user "juwan-backend/app/users/api/internal/handler/user"
+ "juwan-backend/app/users/api/internal/svc"
+
+ "github.com/zeromicro/go-zero/rest"
+)
+
+func RegisterHandlers(server *rest.Server, serverCtx *svc.ServiceContext) {
+ server.AddRoutes(
+ rest.WithMiddlewares(
+ []rest.Middleware{serverCtx.Logger},
+ []rest.Route{
+ {
+ // 获取用户信息
+ Method: http.MethodGet,
+ Path: "/:userId",
+ Handler: user.GetUserInfoHandler(serverCtx),
+ },
+ {
+ // 修改用户信息
+ Method: http.MethodPut,
+ Path: "/:userId",
+ Handler: user.UpdateUserInfoHandler(serverCtx),
+ },
+ {
+ // 用户登出
+ Method: http.MethodPost,
+ Path: "/:userId/logout",
+ Handler: user.LogoutHandler(serverCtx),
+ },
+ {
+ // 修改用户密码
+ Method: http.MethodPut,
+ Path: "/:userId/password",
+ Handler: user.UpdatePasswordHandler(serverCtx),
+ },
+ {
+ // 用户登录接口
+ Method: http.MethodPost,
+ Path: "/login",
+ Handler: user.LoginHandler(serverCtx),
+ },
+ {
+ // 用户注册接口
+ Method: http.MethodPost,
+ Path: "/register",
+ Handler: user.RegisterHandler(serverCtx),
+ },
+ }...,
+ ),
+ rest.WithPrefix("/api/users"),
+ )
+}
diff --git a/app/users/api/internal/handler/user/getUserInfoHandler.go b/app/users/api/internal/handler/user/getUserInfoHandler.go
new file mode 100644
index 0000000..c6b9180
--- /dev/null
+++ b/app/users/api/internal/handler/user/getUserInfoHandler.go
@@ -0,0 +1,32 @@
+// Code scaffolded by goctl. Safe to edit.
+// goctl 1.9.2
+
+package user
+
+import (
+ "net/http"
+
+ "github.com/zeromicro/go-zero/rest/httpx"
+ "juwan-backend/app/users/api/internal/logic/user"
+ "juwan-backend/app/users/api/internal/svc"
+ "juwan-backend/app/users/api/internal/types"
+)
+
+// 获取用户信息
+func GetUserInfoHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ var req types.GetUserInfoReq
+ if err := httpx.Parse(r, &req); err != nil {
+ httpx.ErrorCtx(r.Context(), w, err)
+ return
+ }
+
+ l := user.NewGetUserInfoLogic(r.Context(), svcCtx)
+ resp, err := l.GetUserInfo(&req)
+ if err != nil {
+ httpx.ErrorCtx(r.Context(), w, err)
+ } else {
+ httpx.OkJsonCtx(r.Context(), w, resp)
+ }
+ }
+}
diff --git a/app/users/api/internal/handler/user/loginHandler.go b/app/users/api/internal/handler/user/loginHandler.go
new file mode 100644
index 0000000..d9bd694
--- /dev/null
+++ b/app/users/api/internal/handler/user/loginHandler.go
@@ -0,0 +1,32 @@
+// Code scaffolded by goctl. Safe to edit.
+// goctl 1.9.2
+
+package user
+
+import (
+ "net/http"
+
+ "github.com/zeromicro/go-zero/rest/httpx"
+ "juwan-backend/app/users/api/internal/logic/user"
+ "juwan-backend/app/users/api/internal/svc"
+ "juwan-backend/app/users/api/internal/types"
+)
+
+// 用户登录接口
+func LoginHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ var req types.LoginReq
+ if err := httpx.Parse(r, &req); err != nil {
+ httpx.ErrorCtx(r.Context(), w, err)
+ return
+ }
+
+ l := user.NewLoginLogic(r.Context(), svcCtx)
+ resp, err := l.Login(&req)
+ if err != nil {
+ httpx.ErrorCtx(r.Context(), w, err)
+ } else {
+ httpx.OkJsonCtx(r.Context(), w, resp)
+ }
+ }
+}
diff --git a/app/users/api/internal/handler/user/logoutHandler.go b/app/users/api/internal/handler/user/logoutHandler.go
new file mode 100644
index 0000000..1dd45f1
--- /dev/null
+++ b/app/users/api/internal/handler/user/logoutHandler.go
@@ -0,0 +1,32 @@
+// Code scaffolded by goctl. Safe to edit.
+// goctl 1.9.2
+
+package user
+
+import (
+ "net/http"
+
+ "github.com/zeromicro/go-zero/rest/httpx"
+ "juwan-backend/app/users/api/internal/logic/user"
+ "juwan-backend/app/users/api/internal/svc"
+ "juwan-backend/app/users/api/internal/types"
+)
+
+// 用户登出
+func LogoutHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ var req types.LogoutReq
+ if err := httpx.Parse(r, &req); err != nil {
+ httpx.ErrorCtx(r.Context(), w, err)
+ return
+ }
+
+ l := user.NewLogoutLogic(r.Context(), svcCtx)
+ resp, err := l.Logout(&req)
+ if err != nil {
+ httpx.ErrorCtx(r.Context(), w, err)
+ } else {
+ httpx.OkJsonCtx(r.Context(), w, resp)
+ }
+ }
+}
diff --git a/app/user/api/internal/handler/userInfoHandler.go b/app/users/api/internal/handler/user/registerHandler.go
similarity index 53%
rename from app/user/api/internal/handler/userInfoHandler.go
rename to app/users/api/internal/handler/user/registerHandler.go
index a0603d7..7601653 100644
--- a/app/user/api/internal/handler/userInfoHandler.go
+++ b/app/users/api/internal/handler/user/registerHandler.go
@@ -1,28 +1,28 @@
// Code scaffolded by goctl. Safe to edit.
// goctl 1.9.2
-package handler
+package user
import (
"net/http"
"github.com/zeromicro/go-zero/rest/httpx"
- "juwan-backend/app/user/api/internal/logic"
- "juwan-backend/app/user/api/internal/svc"
- "juwan-backend/app/user/api/internal/types"
+ "juwan-backend/app/users/api/internal/logic/user"
+ "juwan-backend/app/users/api/internal/svc"
+ "juwan-backend/app/users/api/internal/types"
)
-// Get user infomaction by user id
-func userInfoHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
+// 用户注册接口
+func RegisterHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
- var req types.UserInfoReq
+ var req types.RegisterReq
if err := httpx.Parse(r, &req); err != nil {
httpx.ErrorCtx(r.Context(), w, err)
return
}
- l := logic.NewUserInfoLogic(r.Context(), svcCtx)
- resp, err := l.UserInfo(&req)
+ l := user.NewRegisterLogic(r.Context(), svcCtx)
+ resp, err := l.Register(&req)
if err != nil {
httpx.ErrorCtx(r.Context(), w, err)
} else {
diff --git a/app/users/api/internal/handler/user/updatePasswordHandler.go b/app/users/api/internal/handler/user/updatePasswordHandler.go
new file mode 100644
index 0000000..db1b50c
--- /dev/null
+++ b/app/users/api/internal/handler/user/updatePasswordHandler.go
@@ -0,0 +1,32 @@
+// Code scaffolded by goctl. Safe to edit.
+// goctl 1.9.2
+
+package user
+
+import (
+ "net/http"
+
+ "github.com/zeromicro/go-zero/rest/httpx"
+ "juwan-backend/app/users/api/internal/logic/user"
+ "juwan-backend/app/users/api/internal/svc"
+ "juwan-backend/app/users/api/internal/types"
+)
+
+// 修改用户密码
+func UpdatePasswordHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ var req types.UpdatePasswordReq
+ if err := httpx.Parse(r, &req); err != nil {
+ httpx.ErrorCtx(r.Context(), w, err)
+ return
+ }
+
+ l := user.NewUpdatePasswordLogic(r.Context(), svcCtx)
+ resp, err := l.UpdatePassword(&req)
+ if err != nil {
+ httpx.ErrorCtx(r.Context(), w, err)
+ } else {
+ httpx.OkJsonCtx(r.Context(), w, resp)
+ }
+ }
+}
diff --git a/app/users/api/internal/handler/user/updateUserInfoHandler.go b/app/users/api/internal/handler/user/updateUserInfoHandler.go
new file mode 100644
index 0000000..1a1aad3
--- /dev/null
+++ b/app/users/api/internal/handler/user/updateUserInfoHandler.go
@@ -0,0 +1,32 @@
+// Code scaffolded by goctl. Safe to edit.
+// goctl 1.9.2
+
+package user
+
+import (
+ "net/http"
+
+ "github.com/zeromicro/go-zero/rest/httpx"
+ "juwan-backend/app/users/api/internal/logic/user"
+ "juwan-backend/app/users/api/internal/svc"
+ "juwan-backend/app/users/api/internal/types"
+)
+
+// 修改用户信息
+func UpdateUserInfoHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ var req types.UpdateUserInfoReq
+ if err := httpx.Parse(r, &req); err != nil {
+ httpx.ErrorCtx(r.Context(), w, err)
+ return
+ }
+
+ l := user.NewUpdateUserInfoLogic(r.Context(), svcCtx)
+ resp, err := l.UpdateUserInfo(&req)
+ if err != nil {
+ httpx.ErrorCtx(r.Context(), w, err)
+ } else {
+ httpx.OkJsonCtx(r.Context(), w, resp)
+ }
+ }
+}
diff --git a/app/users/api/internal/logic/user/getUserInfoLogic.go b/app/users/api/internal/logic/user/getUserInfoLogic.go
new file mode 100644
index 0000000..9422789
--- /dev/null
+++ b/app/users/api/internal/logic/user/getUserInfoLogic.go
@@ -0,0 +1,34 @@
+// Code scaffolded by goctl. Safe to edit.
+// goctl 1.9.2
+
+package user
+
+import (
+ "context"
+
+ "juwan-backend/app/users/api/internal/svc"
+ "juwan-backend/app/users/api/internal/types"
+
+ "github.com/zeromicro/go-zero/core/logx"
+)
+
+type GetUserInfoLogic struct {
+ logx.Logger
+ ctx context.Context
+ svcCtx *svc.ServiceContext
+}
+
+// 获取用户信息
+func NewGetUserInfoLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetUserInfoLogic {
+ return &GetUserInfoLogic{
+ Logger: logx.WithContext(ctx),
+ ctx: ctx,
+ svcCtx: svcCtx,
+ }
+}
+
+func (l *GetUserInfoLogic) GetUserInfo(req *types.GetUserInfoReq) (resp *types.UserInfo, err error) {
+ // todo: add your logic here and delete this line
+
+ return
+}
diff --git a/app/users/api/internal/logic/user/loginLogic.go b/app/users/api/internal/logic/user/loginLogic.go
new file mode 100644
index 0000000..08739d5
--- /dev/null
+++ b/app/users/api/internal/logic/user/loginLogic.go
@@ -0,0 +1,34 @@
+// Code scaffolded by goctl. Safe to edit.
+// goctl 1.9.2
+
+package user
+
+import (
+ "context"
+
+ "juwan-backend/app/users/api/internal/svc"
+ "juwan-backend/app/users/api/internal/types"
+
+ "github.com/zeromicro/go-zero/core/logx"
+)
+
+type LoginLogic struct {
+ logx.Logger
+ ctx context.Context
+ svcCtx *svc.ServiceContext
+}
+
+// 用户登录接口
+func NewLoginLogic(ctx context.Context, svcCtx *svc.ServiceContext) *LoginLogic {
+ return &LoginLogic{
+ Logger: logx.WithContext(ctx),
+ ctx: ctx,
+ svcCtx: svcCtx,
+ }
+}
+
+func (l *LoginLogic) Login(req *types.LoginReq) (resp *types.LoginResp, err error) {
+ // todo: add your logic here and delete this line
+
+ return
+}
diff --git a/app/users/api/internal/logic/user/logoutLogic.go b/app/users/api/internal/logic/user/logoutLogic.go
new file mode 100644
index 0000000..6a5f334
--- /dev/null
+++ b/app/users/api/internal/logic/user/logoutLogic.go
@@ -0,0 +1,34 @@
+// Code scaffolded by goctl. Safe to edit.
+// goctl 1.9.2
+
+package user
+
+import (
+ "context"
+
+ "juwan-backend/app/users/api/internal/svc"
+ "juwan-backend/app/users/api/internal/types"
+
+ "github.com/zeromicro/go-zero/core/logx"
+)
+
+type LogoutLogic struct {
+ logx.Logger
+ ctx context.Context
+ svcCtx *svc.ServiceContext
+}
+
+// 用户登出
+func NewLogoutLogic(ctx context.Context, svcCtx *svc.ServiceContext) *LogoutLogic {
+ return &LogoutLogic{
+ Logger: logx.WithContext(ctx),
+ ctx: ctx,
+ svcCtx: svcCtx,
+ }
+}
+
+func (l *LogoutLogic) Logout(req *types.LogoutReq) (resp *types.LogoutResp, err error) {
+ // todo: add your logic here and delete this line
+
+ return
+}
diff --git a/app/users/api/internal/logic/user/registerLogic.go b/app/users/api/internal/logic/user/registerLogic.go
new file mode 100644
index 0000000..7ae4f5e
--- /dev/null
+++ b/app/users/api/internal/logic/user/registerLogic.go
@@ -0,0 +1,55 @@
+// Code scaffolded by goctl. Safe to edit.
+// goctl 1.9.2
+
+package user
+
+import (
+ "context"
+ "errors"
+
+ "juwan-backend/app/users/api/internal/svc"
+ "juwan-backend/app/users/api/internal/types"
+ "juwan-backend/app/users/rpc/pb"
+
+ "github.com/google/uuid"
+ "github.com/zeromicro/go-zero/core/logx"
+)
+
+type RegisterLogic struct {
+ logx.Logger
+ ctx context.Context
+ svcCtx *svc.ServiceContext
+}
+
+// 用户注册接口
+func NewRegisterLogic(ctx context.Context, svcCtx *svc.ServiceContext) *RegisterLogic {
+ return &RegisterLogic{
+ Logger: logx.WithContext(ctx),
+ ctx: ctx,
+ svcCtx: svcCtx,
+ }
+}
+
+func (l *RegisterLogic) Register(req *types.RegisterReq) (resp *types.RegisterResp, err error) {
+ // todo: add your logic here and delete this line
+ user, err := l.svcCtx.UserRpc.GetUserByUsername(l.ctx, &pb.GetUserByUsernameReq{
+ Username: req.Username,
+ })
+ if err == nil || user != nil {
+ return nil, errors.New("User is exisit")
+ }
+ id, err := uuid.NewRandom()
+ if err != nil {
+ return nil, errors.New("Register is failed")
+ }
+
+ _, err = l.svcCtx.UserRpc.AddUsers(l.ctx, &pb.AddUsersReq{
+ UserId: id.String(),
+ Username: req.Username,
+ Passwd: req.Password,
+ Phone: req.Phone,
+ State: true,
+ })
+
+ return
+}
diff --git a/app/users/api/internal/logic/user/updatePasswordLogic.go b/app/users/api/internal/logic/user/updatePasswordLogic.go
new file mode 100644
index 0000000..b418e30
--- /dev/null
+++ b/app/users/api/internal/logic/user/updatePasswordLogic.go
@@ -0,0 +1,34 @@
+// Code scaffolded by goctl. Safe to edit.
+// goctl 1.9.2
+
+package user
+
+import (
+ "context"
+
+ "juwan-backend/app/users/api/internal/svc"
+ "juwan-backend/app/users/api/internal/types"
+
+ "github.com/zeromicro/go-zero/core/logx"
+)
+
+type UpdatePasswordLogic struct {
+ logx.Logger
+ ctx context.Context
+ svcCtx *svc.ServiceContext
+}
+
+// 修改用户密码
+func NewUpdatePasswordLogic(ctx context.Context, svcCtx *svc.ServiceContext) *UpdatePasswordLogic {
+ return &UpdatePasswordLogic{
+ Logger: logx.WithContext(ctx),
+ ctx: ctx,
+ svcCtx: svcCtx,
+ }
+}
+
+func (l *UpdatePasswordLogic) UpdatePassword(req *types.UpdatePasswordReq) (resp *types.UpdatePasswordResp, err error) {
+ // todo: add your logic here and delete this line
+
+ return
+}
diff --git a/app/users/api/internal/logic/user/updateUserInfoLogic.go b/app/users/api/internal/logic/user/updateUserInfoLogic.go
new file mode 100644
index 0000000..e3d87cc
--- /dev/null
+++ b/app/users/api/internal/logic/user/updateUserInfoLogic.go
@@ -0,0 +1,34 @@
+// Code scaffolded by goctl. Safe to edit.
+// goctl 1.9.2
+
+package user
+
+import (
+ "context"
+
+ "juwan-backend/app/users/api/internal/svc"
+ "juwan-backend/app/users/api/internal/types"
+
+ "github.com/zeromicro/go-zero/core/logx"
+)
+
+type UpdateUserInfoLogic struct {
+ logx.Logger
+ ctx context.Context
+ svcCtx *svc.ServiceContext
+}
+
+// 修改用户信息
+func NewUpdateUserInfoLogic(ctx context.Context, svcCtx *svc.ServiceContext) *UpdateUserInfoLogic {
+ return &UpdateUserInfoLogic{
+ Logger: logx.WithContext(ctx),
+ ctx: ctx,
+ svcCtx: svcCtx,
+ }
+}
+
+func (l *UpdateUserInfoLogic) UpdateUserInfo(req *types.UpdateUserInfoReq) (resp *types.UpdateUserInfoResp, err error) {
+ // todo: add your logic here and delete this line
+
+ return
+}
diff --git a/app/users/api/internal/middleware/loggerMiddleware.go b/app/users/api/internal/middleware/loggerMiddleware.go
new file mode 100644
index 0000000..a7fc87e
--- /dev/null
+++ b/app/users/api/internal/middleware/loggerMiddleware.go
@@ -0,0 +1,22 @@
+// Code scaffolded by goctl. Safe to edit.
+// goctl 1.9.2
+
+package middleware
+
+import "net/http"
+
+type LoggerMiddleware struct {
+}
+
+func NewLoggerMiddleware() *LoggerMiddleware {
+ return &LoggerMiddleware{}
+}
+
+func (m *LoggerMiddleware) Handle(next http.HandlerFunc) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ // TODO generate middleware implement function, delete after code implementation
+
+ // Passthrough to next handler if need
+ next(w, r)
+ }
+}
diff --git a/app/users/api/internal/svc/serviceContext.go b/app/users/api/internal/svc/serviceContext.go
new file mode 100644
index 0000000..03afc77
--- /dev/null
+++ b/app/users/api/internal/svc/serviceContext.go
@@ -0,0 +1,27 @@
+// Code scaffolded by goctl. Safe to edit.
+// goctl 1.9.2
+
+package svc
+
+import (
+ "juwan-backend/app/users/api/internal/config"
+ "juwan-backend/app/users/api/internal/middleware"
+ "juwan-backend/app/users/rpc/usercenter"
+
+ "github.com/zeromicro/go-zero/rest"
+ "github.com/zeromicro/go-zero/zrpc"
+)
+
+type ServiceContext struct {
+ Config config.Config
+ Logger rest.Middleware
+ UserRpc usercenter.Usercenter
+}
+
+func NewServiceContext(c config.Config) *ServiceContext {
+ return &ServiceContext{
+ Config: c,
+ Logger: middleware.NewLoggerMiddleware().Handle,
+ UserRpc: usercenter.NewUsercenter(zrpc.MustNewClient(c.UsercenterRpcConf)),
+ }
+}
diff --git a/app/users/api/internal/types/types.go b/app/users/api/internal/types/types.go
new file mode 100644
index 0000000..cd9bb8b
--- /dev/null
+++ b/app/users/api/internal/types/types.go
@@ -0,0 +1,81 @@
+// Code generated by goctl. DO NOT EDIT.
+// goctl 1.9.2
+
+package types
+
+type ErrorResp struct {
+ Code int `json:"code"`
+ Message string `json:"message"`
+}
+
+type GetUserInfoReq struct {
+ UserId int64 `path:"userId" binding:"required,gt=0"`
+}
+
+type LoginReq struct {
+ Username string `json:"username" binding:"required"`
+ Password string `json:"password" binding:"required"`
+}
+
+type LoginResp struct {
+ UserId int64 `json:"userId"`
+ Username string `json:"username"`
+ Email string `json:"email"`
+ Token string `json:"token"`
+ Expires int64 `json:"expires"`
+}
+
+type LogoutReq struct {
+ UserId int64 `path:"userId" binding:"required,gt=0"`
+}
+
+type LogoutResp struct {
+ Message string `json:"message"`
+}
+
+type RegisterReq struct {
+ Username string `json:"username" binding:"required,min=3,max=50"`
+ Password string `json:"password" binding:"required,min=6,max=128"`
+ Email string `json:"email,omitempty" binding:"omitempty,email"`
+ Phone string `json:"phone,omitempty" binding:"omitempty,len=11"`
+}
+
+type RegisterResp struct {
+ UserId int64 `json:"userId"`
+ Username string `json:"username"`
+ Email string `json:"email"`
+ Message string `json:"message"`
+}
+
+type UpdatePasswordReq struct {
+ UserId int64 `path:"userId" binding:"required,gt=0"`
+ OldPassword string `json:"oldPassword" binding:"required"`
+ NewPassword string `json:"newPassword" binding:"required,min=6,max=128"`
+}
+
+type UpdatePasswordResp struct {
+ Message string `json:"message"`
+}
+
+type UpdateUserInfoReq struct {
+ UserId int64 `path:"userId" binding:"required,gt=0"`
+ Email string `json:"email" binding:"omitempty,email"`
+ Phone string `json:"phone" binding:"omitempty,len=11"`
+ Avatar string `json:"avatar" binding:"omitempty,url"`
+}
+
+type UpdateUserInfoResp struct {
+ UserId int64 `json:"userId"`
+ Message string `json:"message"`
+}
+
+type UserInfo struct {
+ UserId int64 `json:"userId"`
+ Username string `json:"username"`
+ Email string `json:"email"`
+ Phone string `json:"phone"`
+ Avatar string `json:"avatar"`
+ Status int `json:"status"`
+ CreateAt int64 `json:"createAt"`
+ UpdateAt int64 `json:"updateAt"`
+}
diff --git a/app/user/api/user.go b/app/users/api/user.go
similarity index 79%
rename from app/user/api/user.go
rename to app/users/api/user.go
index d618a53..b023a60 100644
--- a/app/user/api/user.go
+++ b/app/users/api/user.go
@@ -7,9 +7,9 @@ import (
"flag"
"fmt"
- "juwan-backend/app/user/api/internal/config"
- "juwan-backend/app/user/api/internal/handler"
- "juwan-backend/app/user/api/internal/svc"
+ "juwan-backend/app/users/api/internal/config"
+ "juwan-backend/app/users/api/internal/handler"
+ "juwan-backend/app/users/api/internal/svc"
"github.com/zeromicro/go-zero/core/conf"
"github.com/zeromicro/go-zero/rest"
diff --git a/app/users/rpc/etc/pb.yaml b/app/users/rpc/etc/pb.yaml
new file mode 100644
index 0000000..d1cde6a
--- /dev/null
+++ b/app/users/rpc/etc/pb.yaml
@@ -0,0 +1,15 @@
+Name: pb.rpc
+ListenOn: 0.0.0.0:9001
+
+Prometheus:
+ Host: 0.0.0.0
+ Port: 9001
+ Path: /metrics
+
+DataSource: "${DB_URI}?sslmode=disable"
+
+CacheConf:
+ - Host: "${REDIS_HOST}"
+ Type: cluster
+ Pass: "${REDIS_PASSWORD}"
+ User: "default"
diff --git a/app/users/rpc/internal/config/config.go b/app/users/rpc/internal/config/config.go
new file mode 100644
index 0000000..4672a02
--- /dev/null
+++ b/app/users/rpc/internal/config/config.go
@@ -0,0 +1,12 @@
+package config
+
+import (
+ "github.com/zeromicro/go-zero/core/stores/cache"
+ "github.com/zeromicro/go-zero/zrpc"
+)
+
+type Config struct {
+ zrpc.RpcServerConf
+ DataSource string `json:"dataSource"`
+ CacheConf cache.CacheConf
+}
diff --git a/app/users/rpc/internal/logic/addUsersLogic.go b/app/users/rpc/internal/logic/addUsersLogic.go
new file mode 100644
index 0000000..ad72606
--- /dev/null
+++ b/app/users/rpc/internal/logic/addUsersLogic.go
@@ -0,0 +1,31 @@
+package logic
+
+import (
+ "context"
+
+ "juwan-backend/app/users/rpc/internal/svc"
+ "juwan-backend/app/users/rpc/pb"
+
+ "github.com/zeromicro/go-zero/core/logx"
+)
+
+type AddUsersLogic struct {
+ ctx context.Context
+ svcCtx *svc.ServiceContext
+ logx.Logger
+}
+
+func NewAddUsersLogic(ctx context.Context, svcCtx *svc.ServiceContext) *AddUsersLogic {
+ return &AddUsersLogic{
+ ctx: ctx,
+ svcCtx: svcCtx,
+ Logger: logx.WithContext(ctx),
+ }
+}
+
+// -----------------------users-----------------------
+func (l *AddUsersLogic) AddUsers(in *pb.AddUsersReq) (*pb.AddUsersResp, error) {
+ // todo: add your logic here and delete this line
+
+ return &pb.AddUsersResp{}, nil
+}
diff --git a/app/users/rpc/internal/logic/delUsersLogic.go b/app/users/rpc/internal/logic/delUsersLogic.go
new file mode 100644
index 0000000..62d61e3
--- /dev/null
+++ b/app/users/rpc/internal/logic/delUsersLogic.go
@@ -0,0 +1,30 @@
+package logic
+
+import (
+ "context"
+
+ "juwan-backend/app/users/rpc/internal/svc"
+ "juwan-backend/app/users/rpc/pb"
+
+ "github.com/zeromicro/go-zero/core/logx"
+)
+
+type DelUsersLogic struct {
+ ctx context.Context
+ svcCtx *svc.ServiceContext
+ logx.Logger
+}
+
+func NewDelUsersLogic(ctx context.Context, svcCtx *svc.ServiceContext) *DelUsersLogic {
+ return &DelUsersLogic{
+ ctx: ctx,
+ svcCtx: svcCtx,
+ Logger: logx.WithContext(ctx),
+ }
+}
+
+func (l *DelUsersLogic) DelUsers(in *pb.DelUsersReq) (*pb.DelUsersResp, error) {
+ // todo: add your logic here and delete this line
+
+ return &pb.DelUsersResp{}, nil
+}
diff --git a/app/users/rpc/internal/logic/getUserByUsernameLogic.go b/app/users/rpc/internal/logic/getUserByUsernameLogic.go
new file mode 100644
index 0000000..50ff037
--- /dev/null
+++ b/app/users/rpc/internal/logic/getUserByUsernameLogic.go
@@ -0,0 +1,30 @@
+package logic
+
+import (
+ "context"
+
+ "juwan-backend/app/users/rpc/internal/svc"
+ "juwan-backend/app/users/rpc/pb"
+
+ "github.com/zeromicro/go-zero/core/logx"
+)
+
+type GetUserByUsernameLogic struct {
+ ctx context.Context
+ svcCtx *svc.ServiceContext
+ logx.Logger
+}
+
+func NewGetUserByUsernameLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetUserByUsernameLogic {
+ return &GetUserByUsernameLogic{
+ ctx: ctx,
+ svcCtx: svcCtx,
+ Logger: logx.WithContext(ctx),
+ }
+}
+
+func (l *GetUserByUsernameLogic) GetUserByUsername(in *pb.GetUsersByIdReq) (*pb.GetUsersByIdResp, error) {
+ // todo: add your logic here and delete this line
+
+ return &pb.GetUsersByIdResp{}, nil
+}
diff --git a/app/users/rpc/internal/logic/getUsersByIdLogic.go b/app/users/rpc/internal/logic/getUsersByIdLogic.go
new file mode 100644
index 0000000..969d251
--- /dev/null
+++ b/app/users/rpc/internal/logic/getUsersByIdLogic.go
@@ -0,0 +1,30 @@
+package logic
+
+import (
+ "context"
+
+ "juwan-backend/app/users/rpc/internal/svc"
+ "juwan-backend/app/users/rpc/pb"
+
+ "github.com/zeromicro/go-zero/core/logx"
+)
+
+type GetUsersByIdLogic struct {
+ ctx context.Context
+ svcCtx *svc.ServiceContext
+ logx.Logger
+}
+
+func NewGetUsersByIdLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetUsersByIdLogic {
+ return &GetUsersByIdLogic{
+ ctx: ctx,
+ svcCtx: svcCtx,
+ Logger: logx.WithContext(ctx),
+ }
+}
+
+func (l *GetUsersByIdLogic) GetUsersById(in *pb.GetUsersByIdReq) (*pb.GetUsersByIdResp, error) {
+ // todo: add your logic here and delete this line
+
+ return &pb.GetUsersByIdResp{}, nil
+}
diff --git a/app/users/rpc/internal/logic/searchUsersLogic.go b/app/users/rpc/internal/logic/searchUsersLogic.go
new file mode 100644
index 0000000..59e566e
--- /dev/null
+++ b/app/users/rpc/internal/logic/searchUsersLogic.go
@@ -0,0 +1,30 @@
+package logic
+
+import (
+ "context"
+
+ "juwan-backend/app/users/rpc/internal/svc"
+ "juwan-backend/app/users/rpc/pb"
+
+ "github.com/zeromicro/go-zero/core/logx"
+)
+
+type SearchUsersLogic struct {
+ ctx context.Context
+ svcCtx *svc.ServiceContext
+ logx.Logger
+}
+
+func NewSearchUsersLogic(ctx context.Context, svcCtx *svc.ServiceContext) *SearchUsersLogic {
+ return &SearchUsersLogic{
+ ctx: ctx,
+ svcCtx: svcCtx,
+ Logger: logx.WithContext(ctx),
+ }
+}
+
+func (l *SearchUsersLogic) SearchUsers(in *pb.SearchUsersReq) (*pb.SearchUsersResp, error) {
+ // todo: add your logic here and delete this line
+
+ return &pb.SearchUsersResp{}, nil
+}
diff --git a/app/users/rpc/internal/logic/updateUsersLogic.go b/app/users/rpc/internal/logic/updateUsersLogic.go
new file mode 100644
index 0000000..9ace9aa
--- /dev/null
+++ b/app/users/rpc/internal/logic/updateUsersLogic.go
@@ -0,0 +1,30 @@
+package logic
+
+import (
+ "context"
+
+ "juwan-backend/app/users/rpc/internal/svc"
+ "juwan-backend/app/users/rpc/pb"
+
+ "github.com/zeromicro/go-zero/core/logx"
+)
+
+type UpdateUsersLogic struct {
+ ctx context.Context
+ svcCtx *svc.ServiceContext
+ logx.Logger
+}
+
+func NewUpdateUsersLogic(ctx context.Context, svcCtx *svc.ServiceContext) *UpdateUsersLogic {
+ return &UpdateUsersLogic{
+ ctx: ctx,
+ svcCtx: svcCtx,
+ Logger: logx.WithContext(ctx),
+ }
+}
+
+func (l *UpdateUsersLogic) UpdateUsers(in *pb.UpdateUsersReq) (*pb.UpdateUsersResp, error) {
+ // todo: add your logic here and delete this line
+
+ return &pb.UpdateUsersResp{}, nil
+}
diff --git a/app/users/rpc/internal/models/usersmodel.go b/app/users/rpc/internal/models/usersmodel.go
new file mode 100644
index 0000000..a4aa649
--- /dev/null
+++ b/app/users/rpc/internal/models/usersmodel.go
@@ -0,0 +1,27 @@
+package models
+
+import (
+ "github.com/zeromicro/go-zero/core/stores/cache"
+ "github.com/zeromicro/go-zero/core/stores/sqlx"
+)
+
+var _ UsersModel = (*customUsersModel)(nil)
+
+type (
+ // UsersModel is an interface to be customized, add more methods here,
+ // and implement the added methods in customUsersModel.
+ UsersModel interface {
+ usersModel
+ }
+
+ customUsersModel struct {
+ *defaultUsersModel
+ }
+)
+
+// NewUsersModel returns a model for the database table.
+func NewUsersModel(conn sqlx.SqlConn, c cache.CacheConf, opts ...cache.Option) UsersModel {
+ return &customUsersModel{
+ defaultUsersModel: newUsersModel(conn, c, opts...),
+ }
+}
diff --git a/app/users/rpc/internal/models/usersmodel_gen.go b/app/users/rpc/internal/models/usersmodel_gen.go
new file mode 100644
index 0000000..e941c49
--- /dev/null
+++ b/app/users/rpc/internal/models/usersmodel_gen.go
@@ -0,0 +1,180 @@
+// Code generated by goctl. DO NOT EDIT.
+// versions:
+// goctl version: 1.9.2
+
+package models
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/zeromicro/go-zero/core/stores/builder"
+ "github.com/zeromicro/go-zero/core/stores/cache"
+ "github.com/zeromicro/go-zero/core/stores/sqlc"
+ "github.com/zeromicro/go-zero/core/stores/sqlx"
+ "github.com/zeromicro/go-zero/core/stringx"
+)
+
+var (
+ usersFieldNames = builder.RawFieldNames(&Users{}, true)
+ usersRows = strings.Join(usersFieldNames, ",")
+ usersRowsExpectAutoSet = strings.Join(stringx.Remove(usersFieldNames, "create_at", "create_time", "created_at", "update_at", "update_time", "updated_at"), ",")
+ usersRowsWithPlaceHolder = builder.PostgreSqlJoin(stringx.Remove(usersFieldNames, "user_id", "create_at", "create_time", "created_at", "update_at", "update_time", "updated_at"))
+
+ cachePublicUsersUserIdPrefix = "cache:public:users:userId:"
+ cachePublicUsersPhonePrefix = "cache:public:users:phone:"
+ cachePublicUsersUsernamePrefix = "cache:public:users:username:"
+)
+
+type (
+ usersModel interface {
+ Insert(ctx context.Context, data *Users) (sql.Result, error)
+ FindOne(ctx context.Context, userId string) (*Users, error)
+ FindOneByPhone(ctx context.Context, phone string) (*Users, error)
+ FindOneByUsername(ctx context.Context, username string) (*Users, error)
+ Update(ctx context.Context, data *Users) error
+ Delete(ctx context.Context, userId string) error
+ }
+
+ defaultUsersModel struct {
+ sqlc.CachedConn
+ table string
+ }
+
+ Users struct {
+ UserId string `db:"user_id"`
+ Username string `db:"username"`
+ Passwd string `db:"passwd"`
+ Nickname string `db:"nickname"`
+ Phone string `db:"phone"`
+ RoleType int64 `db:"role_type"`
+ IsVerified bool `db:"is_verified"`
+ State bool `db:"state"`
+ CreatedAt time.Time `db:"created_at"`
+ UpdatedAt time.Time `db:"updated_at"`
+ DeletedAt sql.NullTime `db:"deleted_at"`
+ }
+)
+
+func newUsersModel(conn sqlx.SqlConn, c cache.CacheConf, opts ...cache.Option) *defaultUsersModel {
+ return &defaultUsersModel{
+ CachedConn: sqlc.NewConn(conn, c, opts...),
+ table: `"public"."users"`,
+ }
+}
+
+func (m *defaultUsersModel) Delete(ctx context.Context, userId string) error {
+ data, err := m.FindOne(ctx, userId)
+ if err != nil {
+ return err
+ }
+
+ publicUsersPhoneKey := fmt.Sprintf("%s%v", cachePublicUsersPhonePrefix, data.Phone)
+ publicUsersUserIdKey := fmt.Sprintf("%s%v", cachePublicUsersUserIdPrefix, userId)
+ publicUsersUsernameKey := fmt.Sprintf("%s%v", cachePublicUsersUsernamePrefix, data.Username)
+ _, err = m.ExecCtx(ctx, func(ctx context.Context, conn sqlx.SqlConn) (result sql.Result, err error) {
+ query := fmt.Sprintf("delete from %s where user_id = $1", m.table)
+ return conn.ExecCtx(ctx, query, userId)
+ }, publicUsersPhoneKey, publicUsersUserIdKey, publicUsersUsernameKey)
+ return err
+}
+
+func (m *defaultUsersModel) FindOne(ctx context.Context, userId string) (*Users, error) {
+ publicUsersUserIdKey := fmt.Sprintf("%s%v", cachePublicUsersUserIdPrefix, userId)
+ var resp Users
+ err := m.QueryRowCtx(ctx, &resp, publicUsersUserIdKey, func(ctx context.Context, conn sqlx.SqlConn, v any) error {
+ query := fmt.Sprintf("select %s from %s where user_id = $1 limit 1", usersRows, m.table)
+ return conn.QueryRowCtx(ctx, v, query, userId)
+ })
+ switch err {
+ case nil:
+ return &resp, nil
+ case sqlc.ErrNotFound:
+ return nil, ErrNotFound
+ default:
+ return nil, err
+ }
+}
+
+func (m *defaultUsersModel) FindOneByPhone(ctx context.Context, phone string) (*Users, error) {
+ publicUsersPhoneKey := fmt.Sprintf("%s%v", cachePublicUsersPhonePrefix, phone)
+ var resp Users
+ err := m.QueryRowIndexCtx(ctx, &resp, publicUsersPhoneKey, m.formatPrimary, func(ctx context.Context, conn sqlx.SqlConn, v any) (i any, e error) {
+ query := fmt.Sprintf("select %s from %s where phone = $1 limit 1", usersRows, m.table)
+ if err := conn.QueryRowCtx(ctx, &resp, query, phone); err != nil {
+ return nil, err
+ }
+ return resp.UserId, nil
+ }, m.queryPrimary)
+ switch err {
+ case nil:
+ return &resp, nil
+ case sqlc.ErrNotFound:
+ return nil, ErrNotFound
+ default:
+ return nil, err
+ }
+}
+
+func (m *defaultUsersModel) FindOneByUsername(ctx context.Context, username string) (*Users, error) {
+ publicUsersUsernameKey := fmt.Sprintf("%s%v", cachePublicUsersUsernamePrefix, username)
+ var resp Users
+ err := m.QueryRowIndexCtx(ctx, &resp, publicUsersUsernameKey, m.formatPrimary, func(ctx context.Context, conn sqlx.SqlConn, v any) (i any, e error) {
+ query := fmt.Sprintf("select %s from %s where username = $1 limit 1", usersRows, m.table)
+ if err := conn.QueryRowCtx(ctx, &resp, query, username); err != nil {
+ return nil, err
+ }
+ return resp.UserId, nil
+ }, m.queryPrimary)
+ switch err {
+ case nil:
+ return &resp, nil
+ case sqlc.ErrNotFound:
+ return nil, ErrNotFound
+ default:
+ return nil, err
+ }
+}
+
+func (m *defaultUsersModel) Insert(ctx context.Context, data *Users) (sql.Result, error) {
+ publicUsersPhoneKey := fmt.Sprintf("%s%v", cachePublicUsersPhonePrefix, data.Phone)
+ publicUsersUserIdKey := fmt.Sprintf("%s%v", cachePublicUsersUserIdPrefix, data.UserId)
+ publicUsersUsernameKey := fmt.Sprintf("%s%v", cachePublicUsersUsernamePrefix, data.Username)
+ ret, err := m.ExecCtx(ctx, func(ctx context.Context, conn sqlx.SqlConn) (result sql.Result, err error) {
+ query := fmt.Sprintf("insert into %s (%s) values ($1, $2, $3, $4, $5, $6, $7, $8, $9)", m.table, usersRowsExpectAutoSet)
+ return conn.ExecCtx(ctx, query, data.UserId, data.Username, data.Passwd, data.Nickname, data.Phone, data.RoleType, data.IsVerified, data.State, data.DeletedAt)
+ }, publicUsersPhoneKey, publicUsersUserIdKey, publicUsersUsernameKey)
+ return ret, err
+}
+
+func (m *defaultUsersModel) Update(ctx context.Context, newData *Users) error {
+ data, err := m.FindOne(ctx, newData.UserId)
+ if err != nil {
+ return err
+ }
+
+ publicUsersPhoneKey := fmt.Sprintf("%s%v", cachePublicUsersPhonePrefix, data.Phone)
+ publicUsersUserIdKey := fmt.Sprintf("%s%v", cachePublicUsersUserIdPrefix, data.UserId)
+ publicUsersUsernameKey := fmt.Sprintf("%s%v", cachePublicUsersUsernamePrefix, data.Username)
+ _, err = m.ExecCtx(ctx, func(ctx context.Context, conn sqlx.SqlConn) (result sql.Result, err error) {
+ query := fmt.Sprintf("update %s set %s where user_id = $1", m.table, usersRowsWithPlaceHolder)
+ return conn.ExecCtx(ctx, query, newData.UserId, newData.Username, newData.Passwd, newData.Nickname, newData.Phone, newData.RoleType, newData.IsVerified, newData.State, newData.DeletedAt)
+ }, publicUsersPhoneKey, publicUsersUserIdKey, publicUsersUsernameKey)
+ return err
+}
+
+func (m *defaultUsersModel) formatPrimary(primary any) string {
+ return fmt.Sprintf("%s%v", cachePublicUsersUserIdPrefix, primary)
+}
+
+func (m *defaultUsersModel) queryPrimary(ctx context.Context, conn sqlx.SqlConn, v, primary any) error {
+ query := fmt.Sprintf("select %s from %s where user_id = $1 limit 1", usersRows, m.table)
+ return conn.QueryRowCtx(ctx, v, query, primary)
+}
+
+func (m *defaultUsersModel) tableName() string {
+ return m.table
+}
diff --git a/app/users/rpc/internal/models/vars.go b/app/users/rpc/internal/models/vars.go
new file mode 100644
index 0000000..8d22180
--- /dev/null
+++ b/app/users/rpc/internal/models/vars.go
@@ -0,0 +1,5 @@
+package models
+
+import "github.com/zeromicro/go-zero/core/stores/sqlx"
+
+var ErrNotFound = sqlx.ErrNotFound
diff --git a/app/users/rpc/internal/server/usercenterServer.go b/app/users/rpc/internal/server/usercenterServer.go
new file mode 100644
index 0000000..1504ca8
--- /dev/null
+++ b/app/users/rpc/internal/server/usercenterServer.go
@@ -0,0 +1,55 @@
+// Code generated by goctl. DO NOT EDIT.
+// goctl 1.9.2
+// Source: users.proto
+
+package server
+
+import (
+ "context"
+
+ "juwan-backend/app/users/rpc/internal/logic"
+ "juwan-backend/app/users/rpc/internal/svc"
+ "juwan-backend/app/users/rpc/pb"
+)
+
+type UsercenterServer struct {
+ svcCtx *svc.ServiceContext
+ pb.UnimplementedUsercenterServer
+}
+
+func NewUsercenterServer(svcCtx *svc.ServiceContext) *UsercenterServer {
+ return &UsercenterServer{
+ svcCtx: svcCtx,
+ }
+}
+
+// -----------------------users-----------------------
+func (s *UsercenterServer) AddUsers(ctx context.Context, in *pb.AddUsersReq) (*pb.AddUsersResp, error) {
+ l := logic.NewAddUsersLogic(ctx, s.svcCtx)
+ return l.AddUsers(in)
+}
+
+func (s *UsercenterServer) UpdateUsers(ctx context.Context, in *pb.UpdateUsersReq) (*pb.UpdateUsersResp, error) {
+ l := logic.NewUpdateUsersLogic(ctx, s.svcCtx)
+ return l.UpdateUsers(in)
+}
+
+func (s *UsercenterServer) DelUsers(ctx context.Context, in *pb.DelUsersReq) (*pb.DelUsersResp, error) {
+ l := logic.NewDelUsersLogic(ctx, s.svcCtx)
+ return l.DelUsers(in)
+}
+
+func (s *UsercenterServer) GetUsersById(ctx context.Context, in *pb.GetUsersByIdReq) (*pb.GetUsersByIdResp, error) {
+ l := logic.NewGetUsersByIdLogic(ctx, s.svcCtx)
+ return l.GetUsersById(in)
+}
+
+func (s *UsercenterServer) GetUserByUsername(ctx context.Context, in *pb.GetUserByUsernameReq) (*pb.GetUserByUsernameResp, error) {
+ l := logic.NewGetUserByUsernameLogic(ctx, s.svcCtx)
+ return l.GetUserByUsername(in)
+}
+
+func (s *UsercenterServer) SearchUsers(ctx context.Context, in *pb.SearchUsersReq) (*pb.SearchUsersResp, error) {
+ l := logic.NewSearchUsersLogic(ctx, s.svcCtx)
+ return l.SearchUsers(in)
+}
diff --git a/app/users/rpc/internal/svc/serviceContext.go b/app/users/rpc/internal/svc/serviceContext.go
new file mode 100644
index 0000000..01ac790
--- /dev/null
+++ b/app/users/rpc/internal/svc/serviceContext.go
@@ -0,0 +1,47 @@
+package svc
+
+import (
+ "context"
+ "juwan-backend/app/users/rpc/internal/config"
+ "juwan-backend/app/users/rpc/internal/models"
+ "time"
+
+ "github.com/redis/go-redis/v9"
+ "github.com/zeromicro/go-zero/core/logx"
+ "github.com/zeromicro/go-zero/core/stores/sqlx"
+)
+
+type ServiceContext struct {
+ Config config.Config
+ UsersModel models.UsersModel
+ RedisCluster *redis.ClusterClient
+}
+
+func NewServiceContext(c config.Config) *ServiceContext {
+ conn := sqlx.NewSqlConn("postgres", c.DataSource)
+ logx.Infof("success to connect to postgres~")
+
+ // Initialize Redis Cluster client from CacheConf
+ var redisCluster *redis.ClusterClient
+ if len(c.CacheConf) > 0 {
+ redisCluster = redis.NewClusterClient(&redis.ClusterOptions{
+ Addrs: []string{c.CacheConf[0].Host},
+ Password: c.CacheConf[0].Pass,
+ })
+
+ // Test Redis Cluster connection
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ if err := redisCluster.Ping(ctx).Err(); err != nil {
+ logx.Errorf("failed to connect to redis cluster: %v", err)
+ } else {
+ logx.Infof("success to connect to redis cluster~")
+ }
+ }
+
+ return &ServiceContext{
+ Config: c,
+ UsersModel: models.NewUsersModel(conn, c.CacheConf),
+ RedisCluster: redisCluster,
+ }
+}
diff --git a/app/users/rpc/internal/utils/jwt.go b/app/users/rpc/internal/utils/jwt.go
new file mode 100644
index 0000000..8e8ff90
--- /dev/null
+++ b/app/users/rpc/internal/utils/jwt.go
@@ -0,0 +1,30 @@
+package utils
+
+import (
+ "errors"
+ "time"
+)
+
+type TokenPayload struct {
+ UserId string
+ IsAdmin bool
+}
+
+const (
+ tokenCachePrefixUser = "jwt:user:"
+ tokenCachePrefixToken = "jwt:token:"
+ tokenCacheTTL = 60 * 24 * time.Hour
+ tokenLifetime = 5 * 24 * time.Hour
+)
+
+var (
+ errMissingToken = errors.New("token missing in request")
+ errInvalidToken = errors.New("invalid token claims")
+ errTokenNotInCache = errors.New("token not found in cache")
+ errNoRedisClient = errors.New("redis client not configured")
+)
+
+func NewToken(payload TokenPayload) (string, error) {
+
+ return "", nil
+}
diff --git a/app/user/rpc/pb.go b/app/users/rpc/pb.go
similarity index 76%
rename from app/user/rpc/pb.go
rename to app/users/rpc/pb.go
index c4bc5eb..c0c69c3 100644
--- a/app/user/rpc/pb.go
+++ b/app/users/rpc/pb.go
@@ -4,10 +4,10 @@ import (
"flag"
"fmt"
- "juwan-backend/app/user/rpc/internal/config"
- "juwan-backend/app/user/rpc/internal/server"
- "juwan-backend/app/user/rpc/internal/svc"
- "juwan-backend/app/user/rpc/pb"
+ "juwan-backend/app/users/rpc/internal/config"
+ "juwan-backend/app/users/rpc/internal/server"
+ "juwan-backend/app/users/rpc/internal/svc"
+ "juwan-backend/app/users/rpc/pb"
"github.com/zeromicro/go-zero/core/conf"
"github.com/zeromicro/go-zero/core/service"
@@ -22,7 +22,7 @@ func main() {
flag.Parse()
var c config.Config
- conf.MustLoad(*configFile, &c)
+ conf.MustLoad(*configFile, &c, conf.UseEnv())
ctx := svc.NewServiceContext(c)
s := zrpc.MustNewServer(c.RpcServerConf, func(grpcServer *grpc.Server) {
diff --git a/app/users/rpc/pb/users.pb.go b/app/users/rpc/pb/users.pb.go
new file mode 100644
index 0000000..17e62cf
--- /dev/null
+++ b/app/users/rpc/pb/users.pb.go
@@ -0,0 +1,1073 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.9
+// protoc v6.32.0
+// source: users.proto
+
+package pb
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// --------------------------------users--------------------------------
+type Users struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ UserId string `protobuf:"bytes,1,opt,name=userId,proto3" json:"userId,omitempty"` //userId
+ Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` //username
+ Passwd string `protobuf:"bytes,3,opt,name=passwd,proto3" json:"passwd,omitempty"` //passwd
+ Nickname string `protobuf:"bytes,4,opt,name=nickname,proto3" json:"nickname,omitempty"` //nickname
+ Phone string `protobuf:"bytes,5,opt,name=phone,proto3" json:"phone,omitempty"` //phone
+ RoleType int64 `protobuf:"varint,6,opt,name=roleType,proto3" json:"roleType,omitempty"` //roleType
+ IsVerified bool `protobuf:"varint,7,opt,name=isVerified,proto3" json:"isVerified,omitempty"` //isVerified
+ State bool `protobuf:"varint,8,opt,name=state,proto3" json:"state,omitempty"` //state
+ CreatedAt int64 `protobuf:"varint,9,opt,name=createdAt,proto3" json:"createdAt,omitempty"` //createdAt
+ UpdatedAt int64 `protobuf:"varint,10,opt,name=updatedAt,proto3" json:"updatedAt,omitempty"` //updatedAt
+ DeletedAt int64 `protobuf:"varint,11,opt,name=deletedAt,proto3" json:"deletedAt,omitempty"` //deletedAt
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Users) Reset() {
+ *x = Users{}
+ mi := &file_users_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Users) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Users) ProtoMessage() {}
+
+func (x *Users) ProtoReflect() protoreflect.Message {
+ mi := &file_users_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Users.ProtoReflect.Descriptor instead.
+func (*Users) Descriptor() ([]byte, []int) {
+ return file_users_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Users) GetUserId() string {
+ if x != nil {
+ return x.UserId
+ }
+ return ""
+}
+
+func (x *Users) GetUsername() string {
+ if x != nil {
+ return x.Username
+ }
+ return ""
+}
+
+func (x *Users) GetPasswd() string {
+ if x != nil {
+ return x.Passwd
+ }
+ return ""
+}
+
+func (x *Users) GetNickname() string {
+ if x != nil {
+ return x.Nickname
+ }
+ return ""
+}
+
+func (x *Users) GetPhone() string {
+ if x != nil {
+ return x.Phone
+ }
+ return ""
+}
+
+func (x *Users) GetRoleType() int64 {
+ if x != nil {
+ return x.RoleType
+ }
+ return 0
+}
+
+func (x *Users) GetIsVerified() bool {
+ if x != nil {
+ return x.IsVerified
+ }
+ return false
+}
+
+func (x *Users) GetState() bool {
+ if x != nil {
+ return x.State
+ }
+ return false
+}
+
+func (x *Users) GetCreatedAt() int64 {
+ if x != nil {
+ return x.CreatedAt
+ }
+ return 0
+}
+
+func (x *Users) GetUpdatedAt() int64 {
+ if x != nil {
+ return x.UpdatedAt
+ }
+ return 0
+}
+
+func (x *Users) GetDeletedAt() int64 {
+ if x != nil {
+ return x.DeletedAt
+ }
+ return 0
+}
+
+type AddUsersReq struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ UserId string `protobuf:"bytes,1,opt,name=userId,proto3" json:"userId,omitempty"` //userId
+ Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` //username
+ Passwd string `protobuf:"bytes,3,opt,name=passwd,proto3" json:"passwd,omitempty"` //passwd
+ Nickname string `protobuf:"bytes,4,opt,name=nickname,proto3" json:"nickname,omitempty"` //nickname
+ Phone string `protobuf:"bytes,5,opt,name=phone,proto3" json:"phone,omitempty"` //phone
+ RoleType int64 `protobuf:"varint,6,opt,name=roleType,proto3" json:"roleType,omitempty"` //roleType
+ IsVerified bool `protobuf:"varint,7,opt,name=isVerified,proto3" json:"isVerified,omitempty"` //isVerified
+ State bool `protobuf:"varint,8,opt,name=state,proto3" json:"state,omitempty"` //state
+ CreatedAt int64 `protobuf:"varint,9,opt,name=createdAt,proto3" json:"createdAt,omitempty"` //createdAt
+ UpdatedAt int64 `protobuf:"varint,10,opt,name=updatedAt,proto3" json:"updatedAt,omitempty"` //updatedAt
+ DeletedAt int64 `protobuf:"varint,11,opt,name=deletedAt,proto3" json:"deletedAt,omitempty"` //deletedAt
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *AddUsersReq) Reset() {
+ *x = AddUsersReq{}
+ mi := &file_users_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AddUsersReq) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddUsersReq) ProtoMessage() {}
+
+func (x *AddUsersReq) ProtoReflect() protoreflect.Message {
+ mi := &file_users_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddUsersReq.ProtoReflect.Descriptor instead.
+func (*AddUsersReq) Descriptor() ([]byte, []int) {
+ return file_users_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *AddUsersReq) GetUserId() string {
+ if x != nil {
+ return x.UserId
+ }
+ return ""
+}
+
+func (x *AddUsersReq) GetUsername() string {
+ if x != nil {
+ return x.Username
+ }
+ return ""
+}
+
+func (x *AddUsersReq) GetPasswd() string {
+ if x != nil {
+ return x.Passwd
+ }
+ return ""
+}
+
+func (x *AddUsersReq) GetNickname() string {
+ if x != nil {
+ return x.Nickname
+ }
+ return ""
+}
+
+func (x *AddUsersReq) GetPhone() string {
+ if x != nil {
+ return x.Phone
+ }
+ return ""
+}
+
+func (x *AddUsersReq) GetRoleType() int64 {
+ if x != nil {
+ return x.RoleType
+ }
+ return 0
+}
+
+func (x *AddUsersReq) GetIsVerified() bool {
+ if x != nil {
+ return x.IsVerified
+ }
+ return false
+}
+
+func (x *AddUsersReq) GetState() bool {
+ if x != nil {
+ return x.State
+ }
+ return false
+}
+
+func (x *AddUsersReq) GetCreatedAt() int64 {
+ if x != nil {
+ return x.CreatedAt
+ }
+ return 0
+}
+
+func (x *AddUsersReq) GetUpdatedAt() int64 {
+ if x != nil {
+ return x.UpdatedAt
+ }
+ return 0
+}
+
+func (x *AddUsersReq) GetDeletedAt() int64 {
+ if x != nil {
+ return x.DeletedAt
+ }
+ return 0
+}
+
+type AddUsersResp struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *AddUsersResp) Reset() {
+ *x = AddUsersResp{}
+ mi := &file_users_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AddUsersResp) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddUsersResp) ProtoMessage() {}
+
+func (x *AddUsersResp) ProtoReflect() protoreflect.Message {
+ mi := &file_users_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddUsersResp.ProtoReflect.Descriptor instead.
+func (*AddUsersResp) Descriptor() ([]byte, []int) {
+ return file_users_proto_rawDescGZIP(), []int{2}
+}
+
+type UpdateUsersReq struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ UserId string `protobuf:"bytes,1,opt,name=userId,proto3" json:"userId,omitempty"` //userId
+ Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` //username
+ Passwd string `protobuf:"bytes,3,opt,name=passwd,proto3" json:"passwd,omitempty"` //passwd
+ Nickname string `protobuf:"bytes,4,opt,name=nickname,proto3" json:"nickname,omitempty"` //nickname
+ Phone string `protobuf:"bytes,5,opt,name=phone,proto3" json:"phone,omitempty"` //phone
+ RoleType int64 `protobuf:"varint,6,opt,name=roleType,proto3" json:"roleType,omitempty"` //roleType
+ IsVerified bool `protobuf:"varint,7,opt,name=isVerified,proto3" json:"isVerified,omitempty"` //isVerified
+ State bool `protobuf:"varint,8,opt,name=state,proto3" json:"state,omitempty"` //state
+ CreatedAt int64 `protobuf:"varint,9,opt,name=createdAt,proto3" json:"createdAt,omitempty"` //createdAt
+ UpdatedAt int64 `protobuf:"varint,10,opt,name=updatedAt,proto3" json:"updatedAt,omitempty"` //updatedAt
+ DeletedAt int64 `protobuf:"varint,11,opt,name=deletedAt,proto3" json:"deletedAt,omitempty"` //deletedAt
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *UpdateUsersReq) Reset() {
+ *x = UpdateUsersReq{}
+ mi := &file_users_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UpdateUsersReq) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateUsersReq) ProtoMessage() {}
+
+func (x *UpdateUsersReq) ProtoReflect() protoreflect.Message {
+ mi := &file_users_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateUsersReq.ProtoReflect.Descriptor instead.
+func (*UpdateUsersReq) Descriptor() ([]byte, []int) {
+ return file_users_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *UpdateUsersReq) GetUserId() string {
+ if x != nil {
+ return x.UserId
+ }
+ return ""
+}
+
+func (x *UpdateUsersReq) GetUsername() string {
+ if x != nil {
+ return x.Username
+ }
+ return ""
+}
+
+func (x *UpdateUsersReq) GetPasswd() string {
+ if x != nil {
+ return x.Passwd
+ }
+ return ""
+}
+
+func (x *UpdateUsersReq) GetNickname() string {
+ if x != nil {
+ return x.Nickname
+ }
+ return ""
+}
+
+func (x *UpdateUsersReq) GetPhone() string {
+ if x != nil {
+ return x.Phone
+ }
+ return ""
+}
+
+func (x *UpdateUsersReq) GetRoleType() int64 {
+ if x != nil {
+ return x.RoleType
+ }
+ return 0
+}
+
+func (x *UpdateUsersReq) GetIsVerified() bool {
+ if x != nil {
+ return x.IsVerified
+ }
+ return false
+}
+
+func (x *UpdateUsersReq) GetState() bool {
+ if x != nil {
+ return x.State
+ }
+ return false
+}
+
+func (x *UpdateUsersReq) GetCreatedAt() int64 {
+ if x != nil {
+ return x.CreatedAt
+ }
+ return 0
+}
+
+func (x *UpdateUsersReq) GetUpdatedAt() int64 {
+ if x != nil {
+ return x.UpdatedAt
+ }
+ return 0
+}
+
+func (x *UpdateUsersReq) GetDeletedAt() int64 {
+ if x != nil {
+ return x.DeletedAt
+ }
+ return 0
+}
+
+type UpdateUsersResp struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *UpdateUsersResp) Reset() {
+ *x = UpdateUsersResp{}
+ mi := &file_users_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UpdateUsersResp) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateUsersResp) ProtoMessage() {}
+
+func (x *UpdateUsersResp) ProtoReflect() protoreflect.Message {
+ mi := &file_users_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateUsersResp.ProtoReflect.Descriptor instead.
+func (*UpdateUsersResp) Descriptor() ([]byte, []int) {
+ return file_users_proto_rawDescGZIP(), []int{4}
+}
+
+type DelUsersReq struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` //id
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *DelUsersReq) Reset() {
+ *x = DelUsersReq{}
+ mi := &file_users_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DelUsersReq) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DelUsersReq) ProtoMessage() {}
+
+func (x *DelUsersReq) ProtoReflect() protoreflect.Message {
+ mi := &file_users_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DelUsersReq.ProtoReflect.Descriptor instead.
+func (*DelUsersReq) Descriptor() ([]byte, []int) {
+ return file_users_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *DelUsersReq) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+type DelUsersResp struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *DelUsersResp) Reset() {
+ *x = DelUsersResp{}
+ mi := &file_users_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DelUsersResp) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DelUsersResp) ProtoMessage() {}
+
+func (x *DelUsersResp) ProtoReflect() protoreflect.Message {
+ mi := &file_users_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DelUsersResp.ProtoReflect.Descriptor instead.
+func (*DelUsersResp) Descriptor() ([]byte, []int) {
+ return file_users_proto_rawDescGZIP(), []int{6}
+}
+
+type GetUsersByIdReq struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` //id
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *GetUsersByIdReq) Reset() {
+ *x = GetUsersByIdReq{}
+ mi := &file_users_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetUsersByIdReq) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetUsersByIdReq) ProtoMessage() {}
+
+func (x *GetUsersByIdReq) ProtoReflect() protoreflect.Message {
+ mi := &file_users_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetUsersByIdReq.ProtoReflect.Descriptor instead.
+func (*GetUsersByIdReq) Descriptor() ([]byte, []int) {
+ return file_users_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *GetUsersByIdReq) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+type GetUsersByIdResp struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Users *Users `protobuf:"bytes,1,opt,name=users,proto3" json:"users,omitempty"` //users
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *GetUsersByIdResp) Reset() {
+ *x = GetUsersByIdResp{}
+ mi := &file_users_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetUsersByIdResp) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetUsersByIdResp) ProtoMessage() {}
+
+func (x *GetUsersByIdResp) ProtoReflect() protoreflect.Message {
+ mi := &file_users_proto_msgTypes[8]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetUsersByIdResp.ProtoReflect.Descriptor instead.
+func (*GetUsersByIdResp) Descriptor() ([]byte, []int) {
+ return file_users_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *GetUsersByIdResp) GetUsers() *Users {
+ if x != nil {
+ return x.Users
+ }
+ return nil
+}
+
+type SearchUsersReq struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Page int64 `protobuf:"varint,1,opt,name=page,proto3" json:"page,omitempty"` //page
+ Limit int64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` //limit
+ UserId string `protobuf:"bytes,3,opt,name=userId,proto3" json:"userId,omitempty"` //userId
+ Username string `protobuf:"bytes,4,opt,name=username,proto3" json:"username,omitempty"` //username
+ Passwd string `protobuf:"bytes,5,opt,name=passwd,proto3" json:"passwd,omitempty"` //passwd
+ Nickname string `protobuf:"bytes,6,opt,name=nickname,proto3" json:"nickname,omitempty"` //nickname
+ Phone string `protobuf:"bytes,7,opt,name=phone,proto3" json:"phone,omitempty"` //phone
+ RoleType int64 `protobuf:"varint,8,opt,name=roleType,proto3" json:"roleType,omitempty"` //roleType
+ IsVerified bool `protobuf:"varint,9,opt,name=isVerified,proto3" json:"isVerified,omitempty"` //isVerified
+ State bool `protobuf:"varint,10,opt,name=state,proto3" json:"state,omitempty"` //state
+ CreatedAt int64 `protobuf:"varint,11,opt,name=createdAt,proto3" json:"createdAt,omitempty"` //createdAt
+ UpdatedAt int64 `protobuf:"varint,12,opt,name=updatedAt,proto3" json:"updatedAt,omitempty"` //updatedAt
+ DeletedAt int64 `protobuf:"varint,13,opt,name=deletedAt,proto3" json:"deletedAt,omitempty"` //deletedAt
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *SearchUsersReq) Reset() {
+ *x = SearchUsersReq{}
+ mi := &file_users_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SearchUsersReq) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SearchUsersReq) ProtoMessage() {}
+
+func (x *SearchUsersReq) ProtoReflect() protoreflect.Message {
+ mi := &file_users_proto_msgTypes[9]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SearchUsersReq.ProtoReflect.Descriptor instead.
+func (*SearchUsersReq) Descriptor() ([]byte, []int) {
+ return file_users_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *SearchUsersReq) GetPage() int64 {
+ if x != nil {
+ return x.Page
+ }
+ return 0
+}
+
+func (x *SearchUsersReq) GetLimit() int64 {
+ if x != nil {
+ return x.Limit
+ }
+ return 0
+}
+
+func (x *SearchUsersReq) GetUserId() string {
+ if x != nil {
+ return x.UserId
+ }
+ return ""
+}
+
+func (x *SearchUsersReq) GetUsername() string {
+ if x != nil {
+ return x.Username
+ }
+ return ""
+}
+
+func (x *SearchUsersReq) GetPasswd() string {
+ if x != nil {
+ return x.Passwd
+ }
+ return ""
+}
+
+func (x *SearchUsersReq) GetNickname() string {
+ if x != nil {
+ return x.Nickname
+ }
+ return ""
+}
+
+func (x *SearchUsersReq) GetPhone() string {
+ if x != nil {
+ return x.Phone
+ }
+ return ""
+}
+
+func (x *SearchUsersReq) GetRoleType() int64 {
+ if x != nil {
+ return x.RoleType
+ }
+ return 0
+}
+
+func (x *SearchUsersReq) GetIsVerified() bool {
+ if x != nil {
+ return x.IsVerified
+ }
+ return false
+}
+
+func (x *SearchUsersReq) GetState() bool {
+ if x != nil {
+ return x.State
+ }
+ return false
+}
+
+func (x *SearchUsersReq) GetCreatedAt() int64 {
+ if x != nil {
+ return x.CreatedAt
+ }
+ return 0
+}
+
+func (x *SearchUsersReq) GetUpdatedAt() int64 {
+ if x != nil {
+ return x.UpdatedAt
+ }
+ return 0
+}
+
+func (x *SearchUsersReq) GetDeletedAt() int64 {
+ if x != nil {
+ return x.DeletedAt
+ }
+ return 0
+}
+
+type SearchUsersResp struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Users []*Users `protobuf:"bytes,1,rep,name=users,proto3" json:"users,omitempty"` //users
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *SearchUsersResp) Reset() {
+ *x = SearchUsersResp{}
+ mi := &file_users_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SearchUsersResp) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SearchUsersResp) ProtoMessage() {}
+
+func (x *SearchUsersResp) ProtoReflect() protoreflect.Message {
+ mi := &file_users_proto_msgTypes[10]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SearchUsersResp.ProtoReflect.Descriptor instead.
+func (*SearchUsersResp) Descriptor() ([]byte, []int) {
+ return file_users_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *SearchUsersResp) GetUsers() []*Users {
+ if x != nil {
+ return x.Users
+ }
+ return nil
+}
+
+type GetUserByUsernameReq struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` //username
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *GetUserByUsernameReq) Reset() {
+ *x = GetUserByUsernameReq{}
+ mi := &file_users_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetUserByUsernameReq) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetUserByUsernameReq) ProtoMessage() {}
+
+func (x *GetUserByUsernameReq) ProtoReflect() protoreflect.Message {
+ mi := &file_users_proto_msgTypes[11]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetUserByUsernameReq.ProtoReflect.Descriptor instead.
+func (*GetUserByUsernameReq) Descriptor() ([]byte, []int) {
+ return file_users_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *GetUserByUsernameReq) GetUsername() string {
+ if x != nil {
+ return x.Username
+ }
+ return ""
+}
+
+type GetUserByUsernameResp struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Users *Users `protobuf:"bytes,1,opt,name=users,proto3" json:"users,omitempty"` //users
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *GetUserByUsernameResp) Reset() {
+ *x = GetUserByUsernameResp{}
+ mi := &file_users_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetUserByUsernameResp) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetUserByUsernameResp) ProtoMessage() {}
+
+func (x *GetUserByUsernameResp) ProtoReflect() protoreflect.Message {
+ mi := &file_users_proto_msgTypes[12]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetUserByUsernameResp.ProtoReflect.Descriptor instead.
+func (*GetUserByUsernameResp) Descriptor() ([]byte, []int) {
+ return file_users_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *GetUserByUsernameResp) GetUsers() *Users {
+ if x != nil {
+ return x.Users
+ }
+ return nil
+}
+
+var File_users_proto protoreflect.FileDescriptor
+
+const file_users_proto_rawDesc = "" +
+ "\n" +
+ "\vusers.proto\x12\x02pb\"\xb1\x02\n" +
+ "\x05Users\x12\x16\n" +
+ "\x06userId\x18\x01 \x01(\tR\x06userId\x12\x1a\n" +
+ "\busername\x18\x02 \x01(\tR\busername\x12\x16\n" +
+ "\x06passwd\x18\x03 \x01(\tR\x06passwd\x12\x1a\n" +
+ "\bnickname\x18\x04 \x01(\tR\bnickname\x12\x14\n" +
+ "\x05phone\x18\x05 \x01(\tR\x05phone\x12\x1a\n" +
+ "\broleType\x18\x06 \x01(\x03R\broleType\x12\x1e\n" +
+ "\n" +
+ "isVerified\x18\a \x01(\bR\n" +
+ "isVerified\x12\x14\n" +
+ "\x05state\x18\b \x01(\bR\x05state\x12\x1c\n" +
+ "\tcreatedAt\x18\t \x01(\x03R\tcreatedAt\x12\x1c\n" +
+ "\tupdatedAt\x18\n" +
+ " \x01(\x03R\tupdatedAt\x12\x1c\n" +
+ "\tdeletedAt\x18\v \x01(\x03R\tdeletedAt\"\xb7\x02\n" +
+ "\vAddUsersReq\x12\x16\n" +
+ "\x06userId\x18\x01 \x01(\tR\x06userId\x12\x1a\n" +
+ "\busername\x18\x02 \x01(\tR\busername\x12\x16\n" +
+ "\x06passwd\x18\x03 \x01(\tR\x06passwd\x12\x1a\n" +
+ "\bnickname\x18\x04 \x01(\tR\bnickname\x12\x14\n" +
+ "\x05phone\x18\x05 \x01(\tR\x05phone\x12\x1a\n" +
+ "\broleType\x18\x06 \x01(\x03R\broleType\x12\x1e\n" +
+ "\n" +
+ "isVerified\x18\a \x01(\bR\n" +
+ "isVerified\x12\x14\n" +
+ "\x05state\x18\b \x01(\bR\x05state\x12\x1c\n" +
+ "\tcreatedAt\x18\t \x01(\x03R\tcreatedAt\x12\x1c\n" +
+ "\tupdatedAt\x18\n" +
+ " \x01(\x03R\tupdatedAt\x12\x1c\n" +
+ "\tdeletedAt\x18\v \x01(\x03R\tdeletedAt\"\x0e\n" +
+ "\fAddUsersResp\"\xba\x02\n" +
+ "\x0eUpdateUsersReq\x12\x16\n" +
+ "\x06userId\x18\x01 \x01(\tR\x06userId\x12\x1a\n" +
+ "\busername\x18\x02 \x01(\tR\busername\x12\x16\n" +
+ "\x06passwd\x18\x03 \x01(\tR\x06passwd\x12\x1a\n" +
+ "\bnickname\x18\x04 \x01(\tR\bnickname\x12\x14\n" +
+ "\x05phone\x18\x05 \x01(\tR\x05phone\x12\x1a\n" +
+ "\broleType\x18\x06 \x01(\x03R\broleType\x12\x1e\n" +
+ "\n" +
+ "isVerified\x18\a \x01(\bR\n" +
+ "isVerified\x12\x14\n" +
+ "\x05state\x18\b \x01(\bR\x05state\x12\x1c\n" +
+ "\tcreatedAt\x18\t \x01(\x03R\tcreatedAt\x12\x1c\n" +
+ "\tupdatedAt\x18\n" +
+ " \x01(\x03R\tupdatedAt\x12\x1c\n" +
+ "\tdeletedAt\x18\v \x01(\x03R\tdeletedAt\"\x11\n" +
+ "\x0fUpdateUsersResp\"\x1d\n" +
+ "\vDelUsersReq\x12\x0e\n" +
+ "\x02id\x18\x01 \x01(\x03R\x02id\"\x0e\n" +
+ "\fDelUsersResp\"!\n" +
+ "\x0fGetUsersByIdReq\x12\x0e\n" +
+ "\x02id\x18\x01 \x01(\x03R\x02id\"3\n" +
+ "\x10GetUsersByIdResp\x12\x1f\n" +
+ "\x05users\x18\x01 \x01(\v2\t.pb.UsersR\x05users\"\xe4\x02\n" +
+ "\x0eSearchUsersReq\x12\x12\n" +
+ "\x04page\x18\x01 \x01(\x03R\x04page\x12\x14\n" +
+ "\x05limit\x18\x02 \x01(\x03R\x05limit\x12\x16\n" +
+ "\x06userId\x18\x03 \x01(\tR\x06userId\x12\x1a\n" +
+ "\busername\x18\x04 \x01(\tR\busername\x12\x16\n" +
+ "\x06passwd\x18\x05 \x01(\tR\x06passwd\x12\x1a\n" +
+ "\bnickname\x18\x06 \x01(\tR\bnickname\x12\x14\n" +
+ "\x05phone\x18\a \x01(\tR\x05phone\x12\x1a\n" +
+ "\broleType\x18\b \x01(\x03R\broleType\x12\x1e\n" +
+ "\n" +
+ "isVerified\x18\t \x01(\bR\n" +
+ "isVerified\x12\x14\n" +
+ "\x05state\x18\n" +
+ " \x01(\bR\x05state\x12\x1c\n" +
+ "\tcreatedAt\x18\v \x01(\x03R\tcreatedAt\x12\x1c\n" +
+ "\tupdatedAt\x18\f \x01(\x03R\tupdatedAt\x12\x1c\n" +
+ "\tdeletedAt\x18\r \x01(\x03R\tdeletedAt\"2\n" +
+ "\x0fSearchUsersResp\x12\x1f\n" +
+ "\x05users\x18\x01 \x03(\v2\t.pb.UsersR\x05users\"2\n" +
+ "\x14GetUserByUsernameReq\x12\x1a\n" +
+ "\busername\x18\x01 \x01(\tR\busername\"8\n" +
+ "\x15GetUserByUsernameResp\x12\x1f\n" +
+ "\x05users\x18\x01 \x01(\v2\t.pb.UsersR\x05users2\xdf\x02\n" +
+ "\n" +
+ "usercenter\x12-\n" +
+ "\bAddUsers\x12\x0f.pb.AddUsersReq\x1a\x10.pb.AddUsersResp\x126\n" +
+ "\vUpdateUsers\x12\x12.pb.UpdateUsersReq\x1a\x13.pb.UpdateUsersResp\x12-\n" +
+ "\bDelUsers\x12\x0f.pb.DelUsersReq\x1a\x10.pb.DelUsersResp\x129\n" +
+ "\fGetUsersById\x12\x13.pb.GetUsersByIdReq\x1a\x14.pb.GetUsersByIdResp\x12H\n" +
+ "\x11GetUserByUsername\x12\x18.pb.GetUserByUsernameReq\x1a\x19.pb.GetUserByUsernameResp\x126\n" +
+ "\vSearchUsers\x12\x12.pb.SearchUsersReq\x1a\x13.pb.SearchUsersRespB\x06Z\x04./pbb\x06proto3"
+
+var (
+ file_users_proto_rawDescOnce sync.Once
+ file_users_proto_rawDescData []byte
+)
+
+func file_users_proto_rawDescGZIP() []byte {
+ file_users_proto_rawDescOnce.Do(func() {
+ file_users_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_users_proto_rawDesc), len(file_users_proto_rawDesc)))
+ })
+ return file_users_proto_rawDescData
+}
+
+var file_users_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
+var file_users_proto_goTypes = []any{
+ (*Users)(nil), // 0: pb.Users
+ (*AddUsersReq)(nil), // 1: pb.AddUsersReq
+ (*AddUsersResp)(nil), // 2: pb.AddUsersResp
+ (*UpdateUsersReq)(nil), // 3: pb.UpdateUsersReq
+ (*UpdateUsersResp)(nil), // 4: pb.UpdateUsersResp
+ (*DelUsersReq)(nil), // 5: pb.DelUsersReq
+ (*DelUsersResp)(nil), // 6: pb.DelUsersResp
+ (*GetUsersByIdReq)(nil), // 7: pb.GetUsersByIdReq
+ (*GetUsersByIdResp)(nil), // 8: pb.GetUsersByIdResp
+ (*SearchUsersReq)(nil), // 9: pb.SearchUsersReq
+ (*SearchUsersResp)(nil), // 10: pb.SearchUsersResp
+ (*GetUserByUsernameReq)(nil), // 11: pb.GetUserByUsernameReq
+ (*GetUserByUsernameResp)(nil), // 12: pb.GetUserByUsernameResp
+}
+var file_users_proto_depIdxs = []int32{
+ 0, // 0: pb.GetUsersByIdResp.users:type_name -> pb.Users
+ 0, // 1: pb.SearchUsersResp.users:type_name -> pb.Users
+ 0, // 2: pb.GetUserByUsernameResp.users:type_name -> pb.Users
+ 1, // 3: pb.usercenter.AddUsers:input_type -> pb.AddUsersReq
+ 3, // 4: pb.usercenter.UpdateUsers:input_type -> pb.UpdateUsersReq
+ 5, // 5: pb.usercenter.DelUsers:input_type -> pb.DelUsersReq
+ 7, // 6: pb.usercenter.GetUsersById:input_type -> pb.GetUsersByIdReq
+ 11, // 7: pb.usercenter.GetUserByUsername:input_type -> pb.GetUserByUsernameReq
+ 9, // 8: pb.usercenter.SearchUsers:input_type -> pb.SearchUsersReq
+ 2, // 9: pb.usercenter.AddUsers:output_type -> pb.AddUsersResp
+ 4, // 10: pb.usercenter.UpdateUsers:output_type -> pb.UpdateUsersResp
+ 6, // 11: pb.usercenter.DelUsers:output_type -> pb.DelUsersResp
+ 8, // 12: pb.usercenter.GetUsersById:output_type -> pb.GetUsersByIdResp
+ 12, // 13: pb.usercenter.GetUserByUsername:output_type -> pb.GetUserByUsernameResp
+ 10, // 14: pb.usercenter.SearchUsers:output_type -> pb.SearchUsersResp
+ 9, // [9:15] is the sub-list for method output_type
+ 3, // [3:9] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_users_proto_init() }
+func file_users_proto_init() {
+ if File_users_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_users_proto_rawDesc), len(file_users_proto_rawDesc)),
+ NumEnums: 0,
+ NumMessages: 13,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_users_proto_goTypes,
+ DependencyIndexes: file_users_proto_depIdxs,
+ MessageInfos: file_users_proto_msgTypes,
+ }.Build()
+ File_users_proto = out.File
+ file_users_proto_goTypes = nil
+ file_users_proto_depIdxs = nil
+}
diff --git a/app/users/rpc/pb/users_grpc.pb.go b/app/users/rpc/pb/users_grpc.pb.go
new file mode 100644
index 0000000..389e6cb
--- /dev/null
+++ b/app/users/rpc/pb/users_grpc.pb.go
@@ -0,0 +1,313 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.5.1
+// - protoc v6.32.0
+// source: users.proto
+
+package pb
+
+import (
+ context "context"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.64.0 or later.
+const _ = grpc.SupportPackageIsVersion9
+
+const (
+ Usercenter_AddUsers_FullMethodName = "/pb.usercenter/AddUsers"
+ Usercenter_UpdateUsers_FullMethodName = "/pb.usercenter/UpdateUsers"
+ Usercenter_DelUsers_FullMethodName = "/pb.usercenter/DelUsers"
+ Usercenter_GetUsersById_FullMethodName = "/pb.usercenter/GetUsersById"
+ Usercenter_GetUserByUsername_FullMethodName = "/pb.usercenter/GetUserByUsername"
+ Usercenter_SearchUsers_FullMethodName = "/pb.usercenter/SearchUsers"
+)
+
+// UsercenterClient is the client API for Usercenter service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type UsercenterClient interface {
+ // -----------------------users-----------------------
+ AddUsers(ctx context.Context, in *AddUsersReq, opts ...grpc.CallOption) (*AddUsersResp, error)
+ UpdateUsers(ctx context.Context, in *UpdateUsersReq, opts ...grpc.CallOption) (*UpdateUsersResp, error)
+ DelUsers(ctx context.Context, in *DelUsersReq, opts ...grpc.CallOption) (*DelUsersResp, error)
+ GetUsersById(ctx context.Context, in *GetUsersByIdReq, opts ...grpc.CallOption) (*GetUsersByIdResp, error)
+ GetUserByUsername(ctx context.Context, in *GetUserByUsernameReq, opts ...grpc.CallOption) (*GetUserByUsernameResp, error)
+ SearchUsers(ctx context.Context, in *SearchUsersReq, opts ...grpc.CallOption) (*SearchUsersResp, error)
+}
+
+type usercenterClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewUsercenterClient(cc grpc.ClientConnInterface) UsercenterClient {
+ return &usercenterClient{cc}
+}
+
+func (c *usercenterClient) AddUsers(ctx context.Context, in *AddUsersReq, opts ...grpc.CallOption) (*AddUsersResp, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(AddUsersResp)
+ err := c.cc.Invoke(ctx, Usercenter_AddUsers_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *usercenterClient) UpdateUsers(ctx context.Context, in *UpdateUsersReq, opts ...grpc.CallOption) (*UpdateUsersResp, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(UpdateUsersResp)
+ err := c.cc.Invoke(ctx, Usercenter_UpdateUsers_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *usercenterClient) DelUsers(ctx context.Context, in *DelUsersReq, opts ...grpc.CallOption) (*DelUsersResp, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(DelUsersResp)
+ err := c.cc.Invoke(ctx, Usercenter_DelUsers_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *usercenterClient) GetUsersById(ctx context.Context, in *GetUsersByIdReq, opts ...grpc.CallOption) (*GetUsersByIdResp, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(GetUsersByIdResp)
+ err := c.cc.Invoke(ctx, Usercenter_GetUsersById_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *usercenterClient) GetUserByUsername(ctx context.Context, in *GetUserByUsernameReq, opts ...grpc.CallOption) (*GetUserByUsernameResp, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(GetUserByUsernameResp)
+ err := c.cc.Invoke(ctx, Usercenter_GetUserByUsername_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *usercenterClient) SearchUsers(ctx context.Context, in *SearchUsersReq, opts ...grpc.CallOption) (*SearchUsersResp, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(SearchUsersResp)
+ err := c.cc.Invoke(ctx, Usercenter_SearchUsers_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// UsercenterServer is the server API for Usercenter service.
+// All implementations must embed UnimplementedUsercenterServer
+// for forward compatibility.
+type UsercenterServer interface {
+ // -----------------------users-----------------------
+ AddUsers(context.Context, *AddUsersReq) (*AddUsersResp, error)
+ UpdateUsers(context.Context, *UpdateUsersReq) (*UpdateUsersResp, error)
+ DelUsers(context.Context, *DelUsersReq) (*DelUsersResp, error)
+ GetUsersById(context.Context, *GetUsersByIdReq) (*GetUsersByIdResp, error)
+ GetUserByUsername(context.Context, *GetUserByUsernameReq) (*GetUserByUsernameResp, error)
+ SearchUsers(context.Context, *SearchUsersReq) (*SearchUsersResp, error)
+ mustEmbedUnimplementedUsercenterServer()
+}
+
+// UnimplementedUsercenterServer must be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedUsercenterServer struct{}
+
+func (UnimplementedUsercenterServer) AddUsers(context.Context, *AddUsersReq) (*AddUsersResp, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method AddUsers not implemented")
+}
+func (UnimplementedUsercenterServer) UpdateUsers(context.Context, *UpdateUsersReq) (*UpdateUsersResp, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateUsers not implemented")
+}
+func (UnimplementedUsercenterServer) DelUsers(context.Context, *DelUsersReq) (*DelUsersResp, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DelUsers not implemented")
+}
+func (UnimplementedUsercenterServer) GetUsersById(context.Context, *GetUsersByIdReq) (*GetUsersByIdResp, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetUsersById not implemented")
+}
+func (UnimplementedUsercenterServer) GetUserByUsername(context.Context, *GetUserByUsernameReq) (*GetUserByUsernameResp, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetUserByUsername not implemented")
+}
+func (UnimplementedUsercenterServer) SearchUsers(context.Context, *SearchUsersReq) (*SearchUsersResp, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method SearchUsers not implemented")
+}
+func (UnimplementedUsercenterServer) mustEmbedUnimplementedUsercenterServer() {}
+func (UnimplementedUsercenterServer) testEmbeddedByValue() {}
+
+// UnsafeUsercenterServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to UsercenterServer will
+// result in compilation errors.
+type UnsafeUsercenterServer interface {
+ mustEmbedUnimplementedUsercenterServer()
+}
+
+func RegisterUsercenterServer(s grpc.ServiceRegistrar, srv UsercenterServer) {
+ // If the following call pancis, it indicates UnimplementedUsercenterServer was
+ // embedded by pointer and is nil. This will cause panics if an
+ // unimplemented method is ever invoked, so we test this at initialization
+ // time to prevent it from happening at runtime later due to I/O.
+ if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+ t.testEmbeddedByValue()
+ }
+ s.RegisterService(&Usercenter_ServiceDesc, srv)
+}
+
+func _Usercenter_AddUsers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AddUsersReq)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(UsercenterServer).AddUsers(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Usercenter_AddUsers_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(UsercenterServer).AddUsers(ctx, req.(*AddUsersReq))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Usercenter_UpdateUsers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateUsersReq)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(UsercenterServer).UpdateUsers(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Usercenter_UpdateUsers_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(UsercenterServer).UpdateUsers(ctx, req.(*UpdateUsersReq))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Usercenter_DelUsers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DelUsersReq)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(UsercenterServer).DelUsers(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Usercenter_DelUsers_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(UsercenterServer).DelUsers(ctx, req.(*DelUsersReq))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Usercenter_GetUsersById_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetUsersByIdReq)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(UsercenterServer).GetUsersById(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Usercenter_GetUsersById_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(UsercenterServer).GetUsersById(ctx, req.(*GetUsersByIdReq))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Usercenter_GetUserByUsername_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetUserByUsernameReq)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(UsercenterServer).GetUserByUsername(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Usercenter_GetUserByUsername_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(UsercenterServer).GetUserByUsername(ctx, req.(*GetUserByUsernameReq))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Usercenter_SearchUsers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SearchUsersReq)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(UsercenterServer).SearchUsers(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Usercenter_SearchUsers_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(UsercenterServer).SearchUsers(ctx, req.(*SearchUsersReq))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// Usercenter_ServiceDesc is the grpc.ServiceDesc for Usercenter service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var Usercenter_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "pb.usercenter",
+ HandlerType: (*UsercenterServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "AddUsers",
+ Handler: _Usercenter_AddUsers_Handler,
+ },
+ {
+ MethodName: "UpdateUsers",
+ Handler: _Usercenter_UpdateUsers_Handler,
+ },
+ {
+ MethodName: "DelUsers",
+ Handler: _Usercenter_DelUsers_Handler,
+ },
+ {
+ MethodName: "GetUsersById",
+ Handler: _Usercenter_GetUsersById_Handler,
+ },
+ {
+ MethodName: "GetUserByUsername",
+ Handler: _Usercenter_GetUserByUsername_Handler,
+ },
+ {
+ MethodName: "SearchUsers",
+ Handler: _Usercenter_SearchUsers_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "users.proto",
+}
diff --git a/app/users/rpc/usercenter/usercenter.go b/app/users/rpc/usercenter/usercenter.go
new file mode 100644
index 0000000..f633e14
--- /dev/null
+++ b/app/users/rpc/usercenter/usercenter.go
@@ -0,0 +1,81 @@
+// Code generated by goctl. DO NOT EDIT.
+// goctl 1.9.2
+// Source: users.proto
+
+package usercenter
+
+import (
+ "context"
+
+ "juwan-backend/app/users/rpc/pb"
+
+ "github.com/zeromicro/go-zero/zrpc"
+ "google.golang.org/grpc"
+)
+
+type (
+ AddUsersReq = pb.AddUsersReq
+ AddUsersResp = pb.AddUsersResp
+ DelUsersReq = pb.DelUsersReq
+ DelUsersResp = pb.DelUsersResp
+ GetUserByUsernameReq = pb.GetUserByUsernameReq
+ GetUserByUsernameResp = pb.GetUserByUsernameResp
+ GetUsersByIdReq = pb.GetUsersByIdReq
+ GetUsersByIdResp = pb.GetUsersByIdResp
+ SearchUsersReq = pb.SearchUsersReq
+ SearchUsersResp = pb.SearchUsersResp
+ UpdateUsersReq = pb.UpdateUsersReq
+ UpdateUsersResp = pb.UpdateUsersResp
+ Users = pb.Users
+
+ Usercenter interface {
+ // -----------------------users-----------------------
+ AddUsers(ctx context.Context, in *AddUsersReq, opts ...grpc.CallOption) (*AddUsersResp, error)
+ UpdateUsers(ctx context.Context, in *UpdateUsersReq, opts ...grpc.CallOption) (*UpdateUsersResp, error)
+ DelUsers(ctx context.Context, in *DelUsersReq, opts ...grpc.CallOption) (*DelUsersResp, error)
+ GetUsersById(ctx context.Context, in *GetUsersByIdReq, opts ...grpc.CallOption) (*GetUsersByIdResp, error)
+ GetUserByUsername(ctx context.Context, in *GetUserByUsernameReq, opts ...grpc.CallOption) (*GetUserByUsernameResp, error)
+ SearchUsers(ctx context.Context, in *SearchUsersReq, opts ...grpc.CallOption) (*SearchUsersResp, error)
+ }
+
+ defaultUsercenter struct {
+ cli zrpc.Client
+ }
+)
+
+func NewUsercenter(cli zrpc.Client) Usercenter {
+ return &defaultUsercenter{
+ cli: cli,
+ }
+}
+
+// -----------------------users-----------------------
+func (m *defaultUsercenter) AddUsers(ctx context.Context, in *AddUsersReq, opts ...grpc.CallOption) (*AddUsersResp, error) {
+ client := pb.NewUsercenterClient(m.cli.Conn())
+ return client.AddUsers(ctx, in, opts...)
+}
+
+func (m *defaultUsercenter) UpdateUsers(ctx context.Context, in *UpdateUsersReq, opts ...grpc.CallOption) (*UpdateUsersResp, error) {
+ client := pb.NewUsercenterClient(m.cli.Conn())
+ return client.UpdateUsers(ctx, in, opts...)
+}
+
+func (m *defaultUsercenter) DelUsers(ctx context.Context, in *DelUsersReq, opts ...grpc.CallOption) (*DelUsersResp, error) {
+ client := pb.NewUsercenterClient(m.cli.Conn())
+ return client.DelUsers(ctx, in, opts...)
+}
+
+func (m *defaultUsercenter) GetUsersById(ctx context.Context, in *GetUsersByIdReq, opts ...grpc.CallOption) (*GetUsersByIdResp, error) {
+ client := pb.NewUsercenterClient(m.cli.Conn())
+ return client.GetUsersById(ctx, in, opts...)
+}
+
+func (m *defaultUsercenter) GetUserByUsername(ctx context.Context, in *GetUserByUsernameReq, opts ...grpc.CallOption) (*GetUserByUsernameResp, error) {
+ client := pb.NewUsercenterClient(m.cli.Conn())
+ return client.GetUserByUsername(ctx, in, opts...)
+}
+
+func (m *defaultUsercenter) SearchUsers(ctx context.Context, in *SearchUsersReq, opts ...grpc.CallOption) (*SearchUsersResp, error) {
+ client := pb.NewUsercenterClient(m.cli.Conn())
+ return client.SearchUsers(ctx, in, opts...)
+}
diff --git a/backup/user-rpc.yaml.backup b/backup/user-rpc.yaml.backup
new file mode 100644
index 0000000..13676e3
--- /dev/null
+++ b/backup/user-rpc.yaml.backup
@@ -0,0 +1,233 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: user-rpc
+ namespace: juwan
+ labels:
+ app: user-rpc
+spec:
+ replicas: 3
+ revisionHistoryLimit: 5
+ selector:
+ matchLabels:
+ app: user-rpc
+ template:
+ metadata:
+ labels:
+ app: user-rpc
+ spec:
+ serviceAccountName: find-endpoints
+ initContainers: # 等待数据库就绪的 Init Container 不影响资源使用但是影响调度策略(也可以忽略不计)
+ - name: wait-for-db
+ image: busybox:1.36
+ command:
+ [
+ "sh",
+ "-c",
+ 'until nc -z -v -w5 user-db-rw 5432; do echo "Waiting for database..."; sleep 2; done;',
+ ]
+ containers:
+ - name: user-rpc
+ image: user-rpc:v1
+ ports:
+ - containerPort: 9001
+ - containerPort: 4001
+ env:
+ - name: DB_URI
+ valueFrom:
+ secretKeyRef:
+ name: user-db-app
+ key: uri
+ - name: REDIS_HOST
+ value: "user-redis-sentinel-sentinel.juwan:26379"
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: user-redis
+ key: password
+ readinessProbe:
+ tcpSocket:
+ port: 9001
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ livenessProbe:
+ tcpSocket:
+ port: 9001
+ initialDelaySeconds: 15
+ periodSeconds: 20
+ resources:
+ requests:
+ cpu: 500m
+ memory: 512Mi
+ limits:
+ cpu: 1000m
+ memory: 1024Mi
+ volumeMounts:
+ - name: timezone
+ mountPath: /etc/localtime
+ volumes:
+ - name: timezone
+ hostPath:
+ path: /usr/share/zoneinfo/Asia/Shanghai
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: user-rpc-svc
+ namespace: juwan
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "4001"
+ prometheus.io/path: "/metrics"
+spec:
+ ports:
+ - name: rpc
+ port: 9001
+ targetPort: 9001
+ - name: metrics
+ port: 4001
+ targetPort: 4001
+ selector:
+ app: user-rpc
+
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+ name: user-rpc-hpa-c
+ namespace: juwan
+ labels:
+ app: user-rpc-hpa-c
+spec:
+ scaleTargetRef:
+ apiVersion: apps/v1
+ kind: Deployment
+ name: user-rpc
+ minReplicas: 3
+ maxReplicas: 10
+ metrics:
+ - type: Resource
+ resource:
+ name: cpu
+ target:
+ type: Utilization
+ averageUtilization: 80
+
+---
+apiVersion: autoscaling/v2
+kind: HorizontalPodAutoscaler
+metadata:
+ name: user-rpc-hpa-m
+ namespace: juwan
+ labels:
+ app: user-rpc-hpa-m
+spec:
+ scaleTargetRef:
+ apiVersion: apps/v1
+ kind: Deployment
+ name: user-rpc
+ minReplicas: 3
+ maxReplicas: 10
+ metrics:
+ - type: Resource
+ resource:
+ name: memory
+ target:
+ type: Utilization
+ averageUtilization: 80
+---
+# Redis 主从复制
+apiVersion: redis.redis.opstreelabs.in/v1beta2
+kind: RedisReplication
+metadata:
+ name: user-redis
+ namespace: juwan
+spec:
+ clusterSize: 3
+ kubernetesConfig:
+ image: quay.io/opstree/redis:v7.0.12
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+ cpu: 100m
+ memory: 128Mi
+ limits:
+ cpu: 500m
+ memory: 512Mi
+ redisSecret:
+ name: user-redis
+ key: password
+
+ redisExporter:
+ enabled: true
+ image: quay.io/opstree/redis-exporter:latest
+ imagePullPolicy: Always
+ podSecurityContext:
+ runAsUser: 1000
+ fsGroup: 1000
+ storage:
+ volumeClaimTemplate:
+ spec:
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: 1Gi
+
+---
+# Sentinel 监控
+apiVersion: redis.redis.opstreelabs.in/v1beta2
+kind: RedisSentinel
+metadata:
+ name: user-redis-sentinel
+ namespace: juwan
+spec:
+ clusterSize: 3
+ kubernetesConfig:
+ image: quay.io/opstree/redis-sentinel:v7.0.12
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+ cpu: 100m
+ memory: 128Mi
+ limits:
+ cpu: 500m
+ memory: 512Mi
+ podSecurityContext:
+ runAsUser: 1000
+ fsGroup: 1000
+ redisSentinelConfig:
+ redisReplicationName: user-redis
+ masterGroupName: mymaster
+ redisPort: "6379"
+ quorum: "2"
+ downAfterMilliseconds: "5000"
+ failoverTimeout: "10000"
+ parallelSyncs: "1"
+
+---
+# PostgreSQL 集群
+apiVersion: postgresql.cnpg.io/v1
+kind: Cluster
+metadata:
+ namespace: juwan
+ name: user-db
+spec:
+ instances: 3
+ backup:
+ barmanObjectStore:
+ destinationPath: s3://juwan-dev-pg-backups-zj/pg-data/
+ endpointURL: https://cn-nb1.rains3.com
+ s3Credentials:
+ accessKeyId:
+ name: rc-creds
+ key: SOucqRaJr4OyfcIu
+ secretAccessKey:
+ name: rc-creds
+ key: tn2Agj9EowMwuPA9y7TdSL0AXKsMEz
+ wal:
+ compression: gzip
+ storage:
+ size: 1Gi
+ monitoring:
+ enablePodMonitor: true
diff --git a/deploy/k8s/envoy/envoy.yaml b/deploy/k8s/envoy/envoy.yaml
new file mode 100644
index 0000000..2bb3314
--- /dev/null
+++ b/deploy/k8s/envoy/envoy.yaml
@@ -0,0 +1,157 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: envoy-config
+ namespace: juwan
+data:
+ envoy.yaml: |
+ static_resources:
+ listeners:
+ - name: http_listener
+ address:
+ socket_address:
+ address: 0.0.0.0
+ port_value: 8080
+ filter_chains:
+ - filters:
+ - name: envoy.filters.network.http_connection_manager
+ typed_config:
+ "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
+ stat_prefix: ingress_http
+ codec_type: AUTO
+ route_config:
+ name: local_route
+ virtual_hosts:
+ - name: gozero_services
+ domains: ["*"]
+ routes:
+ - match:
+ prefix: "/"
+ route:
+ cluster: user-api
+ http_filters:
+ - name: envoy.filters.http.lua
+ typed_config:
+ "@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua
+ inline_code: |
+ math.randomseed(os.time())
+
+ local function is_safe(method)
+ return method == "GET" or method == "HEAD" or method == "OPTIONS"
+ end
+
+ local function parse_cookie(cookie_header)
+ if not cookie_header then
+ return nil
+ end
+ for cookie in string.gmatch(cookie_header, "([^;]+)") do
+ local k, v = cookie:match("^%s*([^=]+)=?(.*)$")
+ if k == "csrf_token" then
+ return v
+ end
+ end
+ return nil
+ end
+
+ local function random_token()
+ local t = {}
+ for i = 1, 32 do
+ t[i] = string.format("%x", math.random(0, 15))
+ end
+ return table.concat(t)
+ end
+
+ function envoy_on_request(request_handle)
+ local headers = request_handle:headers()
+ local method = headers:get(":method") or ""
+ local cookie = parse_cookie(headers:get("cookie"))
+ local csrf_header = headers:get("x-csrf-token")
+
+ if is_safe(method) then
+ if not cookie then
+ local token = random_token()
+ request_handle:streamInfo():dynamicMetadata():set("csrf", "token", token)
+ end
+ return
+ end
+
+ if not cookie or not csrf_header or cookie ~= csrf_header then
+ request_handle:respond({[":status"] = "403"}, "CSRF validation failed")
+ return
+ end
+ end
+
+ function envoy_on_response(response_handle)
+ local md = response_handle:streamInfo():dynamicMetadata():get("csrf") or {}
+ local token = md["token"]
+ if token then
+ response_handle:headers():add("set-cookie", "csrf_token=" .. token .. "; Path=/; SameSite=Strict")
+ end
+ end
+ - name: envoy.filters.http.router
+ typed_config:
+ "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router
+ clusters:
+ - name: user-api
+ connect_timeout: 2s
+ type: STRICT_DNS
+ lb_policy: ROUND_ROBIN
+ load_assignment:
+ cluster_name: user-api
+ endpoints:
+ - lb_endpoints:
+ - endpoint:
+ address:
+ socket_address:
+ address: user-api-svc.juwan.svc.cluster.local
+ port_value: 8888
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: envoy-gateway
+ namespace: juwan
+ labels:
+ app: envoy-gateway
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: envoy-gateway
+ template:
+ metadata:
+ labels:
+ app: envoy-gateway
+ spec:
+ containers:
+ - name: envoy
+ image: envoyproxy/envoy:v1.32.2
+ args:
+ - "-c"
+ - "/etc/envoy/envoy.yaml"
+ - "--log-level"
+ - "info"
+ ports:
+ - name: http
+ containerPort: 8080
+ volumeMounts:
+ - name: config
+ mountPath: /etc/envoy
+ volumes:
+ - name: config
+ configMap:
+ name: envoy-config
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: envoy-gateway
+ namespace: juwan
+spec:
+ type: ClusterIP
+ selector:
+ app: envoy-gateway
+ ports:
+ - name: http
+ port: 80
+ targetPort: 8080
diff --git a/deploy/k8s/monitoring/00-namespace.yaml b/deploy/k8s/monitoring/00-namespace.yaml
new file mode 100644
index 0000000..0090389
--- /dev/null
+++ b/deploy/k8s/monitoring/00-namespace.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: monitoring
diff --git a/deploy/k8s/monitoring/grafana.yaml b/deploy/k8s/monitoring/grafana.yaml
new file mode 100644
index 0000000..45df5ca
--- /dev/null
+++ b/deploy/k8s/monitoring/grafana.yaml
@@ -0,0 +1,82 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: grafana-admin
+ namespace: monitoring
+type: Opaque
+data:
+ admin-user: YWRtaW4=
+ admin-password: Y2hhbmdlLW1l
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: grafana-datasources
+ namespace: monitoring
+data:
+ datasources.yaml: |
+ apiVersion: 1
+ datasources:
+ - name: Prometheus
+ type: prometheus
+ access: proxy
+ url: http://prometheus:9090
+ isDefault: true
+ - name: Loki
+ type: loki
+ access: proxy
+ url: http://loki:3100
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: grafana
+ namespace: monitoring
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: grafana
+ template:
+ metadata:
+ labels:
+ app: grafana
+ spec:
+ containers:
+ - name: grafana
+ image: grafana/grafana:10.4.6
+ ports:
+ - name: http
+ containerPort: 3000
+ env:
+ - name: GF_SECURITY_ADMIN_USER
+ valueFrom:
+ secretKeyRef:
+ name: grafana-admin
+ key: admin-user
+ - name: GF_SECURITY_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: grafana-admin
+ key: admin-password
+ volumeMounts:
+ - name: datasources
+ mountPath: /etc/grafana/provisioning/datasources
+ volumes:
+ - name: datasources
+ configMap:
+ name: grafana-datasources
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: grafana
+ namespace: monitoring
+spec:
+ type: ClusterIP
+ ports:
+ - name: http
+ port: 3000
+ targetPort: http
+ selector:
+ app: grafana
diff --git a/deploy/k8s/monitoring/loki.yaml b/deploy/k8s/monitoring/loki.yaml
new file mode 100644
index 0000000..1fc0897
--- /dev/null
+++ b/deploy/k8s/monitoring/loki.yaml
@@ -0,0 +1,90 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: loki-config
+ namespace: monitoring
+data:
+ loki.yaml: |
+ auth_enabled: false
+
+ server:
+ http_listen_port: 3100
+
+ common:
+ path_prefix: /loki
+ storage:
+ filesystem:
+ chunks_directory: /loki/chunks
+ rules_directory: /loki/rules
+ replication_factor: 1
+ ring:
+ kvstore:
+ store: inmemory
+
+ schema_config:
+ configs:
+ - from: 2024-01-01
+ store: boltdb-shipper
+ object_store: filesystem
+ schema: v12
+ index:
+ prefix: index_
+ period: 24h
+
+ storage_config:
+ boltdb_shipper:
+ active_index_directory: /loki/index
+ cache_location: /loki/cache
+ shared_store: filesystem
+
+ ruler:
+ alertmanager_url: http://localhost:9093
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: loki
+ namespace: monitoring
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: loki
+ template:
+ metadata:
+ labels:
+ app: loki
+ spec:
+ containers:
+ - name: loki
+ image: grafana/loki:2.9.6
+ args:
+ - "-config.file=/etc/loki/loki.yaml"
+ ports:
+ - name: http
+ containerPort: 3100
+ volumeMounts:
+ - name: config
+ mountPath: /etc/loki
+ - name: data
+ mountPath: /loki
+ volumes:
+ - name: config
+ configMap:
+ name: loki-config
+ - name: data
+ emptyDir: {}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: loki
+ namespace: monitoring
+spec:
+ type: ClusterIP
+ ports:
+ - name: http
+ port: 3100
+ targetPort: http
+ selector:
+ app: loki
diff --git a/deploy/k8s/monitoring/prometheus.yaml b/deploy/k8s/monitoring/prometheus.yaml
new file mode 100644
index 0000000..54484ff
--- /dev/null
+++ b/deploy/k8s/monitoring/prometheus.yaml
@@ -0,0 +1,138 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: prometheus
+ namespace: monitoring
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: prometheus
+rules:
+ - apiGroups: [""]
+ resources:
+ - nodes
+ - nodes/metrics
+ - services
+ - endpoints
+ - pods
+ - namespaces
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["extensions", "apps"]
+ resources:
+ - deployments
+ verbs: ["get", "list", "watch"]
+ - nonResourceURLs: ["/metrics"]
+ verbs: ["get"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: prometheus
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: prometheus
+subjects:
+ - kind: ServiceAccount
+ name: prometheus
+ namespace: monitoring
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: prometheus-config
+ namespace: monitoring
+data:
+ prometheus.yml: |
+ global:
+ scrape_interval: 15s
+ evaluation_interval: 15s
+
+ scrape_configs:
+ - job_name: "prometheus"
+ static_configs:
+ - targets: ["localhost:9090"]
+
+ - job_name: "kubernetes-annotated-endpoints"
+ kubernetes_sd_configs:
+ - role: endpoints
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
+ action: keep
+ regex: "true"
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
+ action: replace
+ target_label: __scheme__
+ regex: (https?)
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
+ action: replace
+ target_label: __metrics_path__
+ regex: (.+)
+ - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
+ action: replace
+ target_label: __address__
+ regex: (.+):(?:\d+);(\d+)
+ replacement: $1:$2
+ - source_labels: [__meta_kubernetes_namespace]
+ action: replace
+ target_label: namespace
+ - source_labels: [__meta_kubernetes_service_name]
+ action: replace
+ target_label: service
+ - source_labels: [__meta_kubernetes_endpoint_port_name]
+ action: replace
+ target_label: port
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: prometheus
+ namespace: monitoring
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: prometheus
+ template:
+ metadata:
+ labels:
+ app: prometheus
+ spec:
+ serviceAccountName: prometheus
+ containers:
+ - name: prometheus
+ image: prom/prometheus:v2.53.0
+ args:
+ - "--config.file=/etc/prometheus/prometheus.yml"
+ - "--storage.tsdb.path=/prometheus"
+ - "--storage.tsdb.retention.time=15d"
+ - "--web.enable-lifecycle"
+ ports:
+ - name: http
+ containerPort: 9090
+ volumeMounts:
+ - name: config
+ mountPath: /etc/prometheus
+ - name: data
+ mountPath: /prometheus
+ volumes:
+ - name: config
+ configMap:
+ name: prometheus-config
+ - name: data
+ emptyDir: {}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: prometheus
+ namespace: monitoring
+spec:
+ type: ClusterIP
+ ports:
+ - name: http
+ port: 9090
+ targetPort: http
+ selector:
+ app: prometheus
diff --git a/deploy/k8s/monitoring/promtail.yaml b/deploy/k8s/monitoring/promtail.yaml
new file mode 100644
index 0000000..c0f5ac4
--- /dev/null
+++ b/deploy/k8s/monitoring/promtail.yaml
@@ -0,0 +1,108 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: promtail
+ namespace: monitoring
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: promtail
+rules:
+ - apiGroups: [""]
+ resources:
+ - pods
+ - namespaces
+ verbs: ["get", "list", "watch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: promtail
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: promtail
+subjects:
+ - kind: ServiceAccount
+ name: promtail
+ namespace: monitoring
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: promtail-config
+ namespace: monitoring
+data:
+ promtail.yaml: |
+ server:
+ http_listen_port: 9080
+ grpc_listen_port: 0
+
+ positions:
+ filename: /run/promtail/positions.yaml
+
+ clients:
+ - url: http://loki:3100/loki/api/v1/push
+
+ scrape_configs:
+ - job_name: kubernetes-pods
+ kubernetes_sd_configs:
+ - role: pod
+ relabel_configs:
+ - action: replace
+ source_labels: [__meta_kubernetes_pod_node_name]
+ target_label: node
+ - action: replace
+ source_labels: [__meta_kubernetes_namespace]
+ target_label: namespace
+ - action: replace
+ source_labels: [__meta_kubernetes_pod_name]
+ target_label: pod
+ - action: replace
+ source_labels: [__meta_kubernetes_pod_container_name]
+ target_label: container
+ - action: replace
+ source_labels: [__meta_kubernetes_pod_uid]
+ target_label: __path__
+ replacement: /var/log/pods/*$1/*/*.log
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: promtail
+ namespace: monitoring
+spec:
+ selector:
+ matchLabels:
+ app: promtail
+ template:
+ metadata:
+ labels:
+ app: promtail
+ spec:
+ serviceAccountName: promtail
+ tolerations:
+ - operator: "Exists"
+ containers:
+ - name: promtail
+ image: grafana/promtail:2.9.6
+ args:
+ - "-config.file=/etc/promtail/promtail.yaml"
+ volumeMounts:
+ - name: config
+ mountPath: /etc/promtail
+ - name: positions
+ mountPath: /run/promtail
+ - name: varlog
+ mountPath: /var/log
+ readOnly: true
+ volumes:
+ - name: config
+ configMap:
+ name: promtail-config
+ - name: positions
+ emptyDir: {}
+ - name: varlog
+ hostPath:
+ path: /var/log
diff --git a/deploy/k8s/base/postgreSql.yaml b/deploy/k8s/postgreSql.yaml
similarity index 100%
rename from deploy/k8s/base/postgreSql.yaml
rename to deploy/k8s/postgreSql.yaml
diff --git a/deploy/k8s/user-api.yaml b/deploy/k8s/service/user/user-api.yaml
similarity index 100%
rename from deploy/k8s/user-api.yaml
rename to deploy/k8s/service/user/user-api.yaml
diff --git a/deploy/k8s/user-rpc.yaml b/deploy/k8s/service/user/user-rpc.yaml
similarity index 70%
rename from deploy/k8s/user-rpc.yaml
rename to deploy/k8s/service/user/user-rpc.yaml
index fb47d5a..7a38133 100644
--- a/deploy/k8s/user-rpc.yaml
+++ b/deploy/k8s/service/user/user-rpc.yaml
@@ -17,7 +17,7 @@ spec:
app: user-rpc
spec:
serviceAccountName: find-endpoints
- initContainers:
+ initContainers: # 等待数据库就绪的 Init Container 不影响资源使用但是影响调度策略(也可以忽略不计)
- name: wait-for-db
image: busybox:1.36
command:
@@ -31,12 +31,20 @@ spec:
image: user-rpc:v1
ports:
- containerPort: 9001
+ - containerPort: 4001
env:
- name: DB_URI
valueFrom:
secretKeyRef:
name: user-db-app
key: uri
+ - name: REDIS_HOST
+ value: "user-redis.juwan:6379"
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: user-redis
+ key: password
readinessProbe:
tcpSocket:
port: 9001
@@ -68,10 +76,18 @@ kind: Service
metadata:
name: user-rpc-svc
namespace: juwan
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "4001"
+ prometheus.io/path: "/metrics"
spec:
ports:
- - port: 9001
+ - name: rpc
+ port: 9001
targetPort: 9001
+ - name: metrics
+ port: 4001
+ targetPort: 4001
selector:
app: user-rpc
@@ -120,8 +136,40 @@ spec:
target:
type: Utilization
averageUtilization: 80
+---
+# Redis Cluster
+apiVersion: redis.redis.opstreelabs.in/v1beta2
+kind: RedisCluster
+metadata:
+ name: user-redis
+ namespace: juwan
+spec:
+ clusterSize: 3
+ kubernetesConfig:
+ image: quay.io/opstree/redis:v7.0.12
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+ cpu: 100m
+ memory: 128Mi
+ limits:
+ cpu: 500m
+ memory: 512Mi
+ redisSecret:
+ name: user-redis
+ key: password
+ redisExporter:
+ enabled: true
+ image: quay.io/opstree/redis-exporter:latest
+ imagePullPolicy: Always
+ podSecurityContext:
+ runAsUser: 1000
+ fsGroup: 1000
+ storage:
+ size: 1Gi
---
+# PostgreSQL 集群
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
@@ -129,11 +177,6 @@ metadata:
name: user-db
spec:
instances: 3
- postInitSQLRefs:
- configMapRefs:
- - name: db-dx-init-script
- key: init-extensions-sql
- optional: false
backup:
barmanObjectStore:
destinationPath: s3://juwan-dev-pg-backups-zj/pg-data/
diff --git a/deploy/script/init-secrets.sh b/deploy/script/init-secrets.sh
new file mode 100644
index 0000000..6861799
--- /dev/null
+++ b/deploy/script/init-secrets.sh
@@ -0,0 +1,3 @@
+kubectl create secret generic user-redis \
+--from-literal=password=$(openssl rand -base64 12) \
+--namespace juwan
\ No newline at end of file
diff --git a/desc/api/user.api b/desc/api/user.api
deleted file mode 100644
index 91de8b7..0000000
--- a/desc/api/user.api
+++ /dev/null
@@ -1,26 +0,0 @@
-syntax = "v1"
-
-info (
- author: "Asadz"
- date: "2024-06-19"
- version: "1.0"
-)
-
-type (
- UserInfoReq {
- UserId int64 `json:"userId"`
- }
- UserInfoResp {
- UserId int64 `json:"userId"`
- Nickname string `json:"nickname"`
- }
-)
-
-service user-api {
- @doc (
- summary: "Get user infomaction by user id"
- )
- @handler userInfo
- post /user/info (UserInfoReq) returns (UserInfoResp)
-}
-
diff --git a/desc/api/users.api b/desc/api/users.api
new file mode 100644
index 0000000..8ad656d
--- /dev/null
+++ b/desc/api/users.api
@@ -0,0 +1,124 @@
+syntax = "v1"
+
+info (
+ author: "Asadz"
+ date: "2024-06-19"
+ version: "1.0"
+)
+
+type (
+ RegisterReq {
+ Username string `json:"username" binding:"required,min=3,max=50"`
+ Password string `json:"password" binding:"required,min=6,max=128"`
+ Email string `json:"email,omitempty" binding:"omitempty,email"`
+ Phone string `json:"phone,omitempty" binding:"omitempty,len=11"`
+ }
+ RegisterResp {
+ UserId int64 `json:"userId"`
+ Username string `json:"username"`
+ Email string `json:"email"`
+ Message string `json:"message"`
+ }
+ LoginReq {
+ Username string `json:"username" binding:"required"`
+ Password string `json:"password" binding:"required"`
+ }
+ LoginResp {
+ UserId int64 `json:"userId"`
+ Username string `json:"username"`
+ Email string `json:"email"`
+ Token string `json:"token"`
+ Expires int64 `json:"expires"`
+ }
+ GetUserInfoReq {
+ UserId int64 `path:"userId" binding:"required,gt=0"`
+ }
+ UserInfo {
+ UserId int64 `json:"userId"`
+ Username string `json:"username"`
+ Email string `json:"email"`
+ Phone string `json:"phone"`
+ Avatar string `json:"avatar"`
+ Status int `json:"status"`
+ CreateAt int64 `json:"createAt"`
+ UpdateAt int64 `json:"updateAt"`
+ }
+ UpdateUserInfoReq {
+ UserId int64 `path:"userId" binding:"required,gt=0"`
+ Email string `json:"email" binding:"omitempty,email"`
+ Phone string `json:"phone" binding:"omitempty,len=11"`
+ Avatar string `json:"avatar" binding:"omitempty,url"`
+ }
+ UpdateUserInfoResp {
+ UserId int64 `json:"userId"`
+ Message string `json:"message"`
+ }
+ UpdatePasswordReq {
+ UserId int64 `path:"userId" binding:"required,gt=0"`
+ OldPassword string `json:"oldPassword" binding:"required"`
+ NewPassword string `json:"newPassword" binding:"required,min=6,max=128"`
+ }
+ UpdatePasswordResp {
+ Message string `json:"message"`
+ }
+ LogoutReq {
+ UserId int64 `path:"userId" binding:"required,gt=0"`
+ }
+ LogoutResp {
+ Message string `json:"message"`
+ }
+ ErrorResp {
+ Code int `json:"code"`
+ Message string `json:"message"`
+ }
+)
+
+@server (
+ group: user
+ prefix: /api/users
+ middleware: Logger
+)
+service user-api {
+ @doc (
+ summary: "用户注册接口"
+ description: "通过用户名、密码、邮箱、电话号码注册新用户账户"
+ )
+ @handler Register
+ post /register (RegisterReq) returns (RegisterResp)
+
+ @doc (
+ summary: "用户登录接口"
+ description: "使用用户名和密码进行登录,返回访问令牌和用户信息"
+ )
+ @handler Login
+ post /login (LoginReq) returns (LoginResp)
+
+ @doc (
+ summary: "获取用户信息"
+ description: "根据用户ID获取用户的详细信息,包含个人资料和账户状态"
+ )
+ @handler GetUserInfo
+ get /:userId (GetUserInfoReq) returns (UserInfo)
+
+ @doc (
+ summary: "修改用户信息"
+ description: "修改用户的邮箱、电话号码、头像等信息"
+ )
+ @handler UpdateUserInfo
+ put /:userId (UpdateUserInfoReq) returns (UpdateUserInfoResp)
+
+ @doc (
+ summary: "修改用户密码"
+ description: "验证旧密码后修改为新密码,需要提供原密码"
+ )
+ @handler UpdatePassword
+ put /:userId/password (UpdatePasswordReq) returns (UpdatePasswordResp)
+
+ @doc (
+ summary: "用户登出"
+ description: "清除用户的登录会话,使用户令牌失效"
+ )
+ @handler Logout
+ post /:userId/logout (LogoutReq) returns (LogoutResp)
+}
+
diff --git a/desc/rpc/user.proto b/desc/rpc/user.proto
deleted file mode 100644
index a8973ac..0000000
--- a/desc/rpc/user.proto
+++ /dev/null
@@ -1,18 +0,0 @@
-syntax = "proto3";
-
-option go_package = "./pb";
-
-package pb;
-
-message GetUserInfoReq {
- int64 id = 1;
-}
-
-message GetUserInfoResp {
- int64 id = 1;
- string nickname = 2;
-}
-
-service Usercenter {
- rpc GetUserInfo(GetUserInfoReq) returns (GetUserInfoResp);
-}
\ No newline at end of file
diff --git a/desc/rpc/users.proto b/desc/rpc/users.proto
new file mode 100644
index 0000000..e30164e
--- /dev/null
+++ b/desc/rpc/users.proto
@@ -0,0 +1,116 @@
+syntax = "proto3";
+
+option go_package ="./pb";
+
+package pb;
+
+// ------------------------------------
+// Messages
+// ------------------------------------
+
+//--------------------------------users--------------------------------
+message Users {
+ string userId = 1; //userId
+ string username = 2; //username
+ string passwd = 3; //passwd
+ string nickname = 4; //nickname
+ string phone = 5; //phone
+ int64 roleType = 6; //roleType
+ bool isVerified = 7; //isVerified
+ bool state = 8; //state
+ int64 createdAt = 9; //createdAt
+ int64 updatedAt = 10; //updatedAt
+ int64 deletedAt = 11; //deletedAt
+}
+
+message AddUsersReq {
+ string userId = 1; //userId
+ string username = 2; //username
+ string passwd = 3; //passwd
+ string nickname = 4; //nickname
+ string phone = 5; //phone
+ int64 roleType = 6; //roleType
+ bool isVerified = 7; //isVerified
+ bool state = 8; //state
+ int64 createdAt = 9; //createdAt
+ int64 updatedAt = 10; //updatedAt
+ int64 deletedAt = 11; //deletedAt
+}
+
+message AddUsersResp {
+}
+
+message UpdateUsersReq {
+ string userId = 1; //userId
+ string username = 2; //username
+ string passwd = 3; //passwd
+ string nickname = 4; //nickname
+ string phone = 5; //phone
+ int64 roleType = 6; //roleType
+ bool isVerified = 7; //isVerified
+ bool state = 8; //state
+ int64 createdAt = 9; //createdAt
+ int64 updatedAt = 10; //updatedAt
+ int64 deletedAt = 11; //deletedAt
+}
+
+message UpdateUsersResp {
+}
+
+message DelUsersReq {
+ int64 id = 1; //id
+}
+
+message DelUsersResp {
+}
+
+message GetUsersByIdReq {
+ int64 id = 1; //id
+}
+
+message GetUsersByIdResp {
+ Users users = 1; //users
+}
+
+message SearchUsersReq {
+ int64 page = 1; //page
+ int64 limit = 2; //limit
+ string userId = 3; //userId
+ string username = 4; //username
+ string passwd = 5; //passwd
+ string nickname = 6; //nickname
+ string phone = 7; //phone
+ int64 roleType = 8; //roleType
+ bool isVerified = 9; //isVerified
+ bool state = 10; //state
+ int64 createdAt = 11; //createdAt
+ int64 updatedAt = 12; //updatedAt
+ int64 deletedAt = 13; //deletedAt
+}
+
+message SearchUsersResp {
+ repeated Users users = 1; //users
+}
+
+message GetUserByUsernameReq {
+ string username = 1; //username
+}
+
+message GetUserByUsernameResp {
+ Users users = 1; //users
+}
+
+// ------------------------------------
+// Rpc Func
+// ------------------------------------
+
+service usercenter{
+
+ //-----------------------users-----------------------
+ rpc AddUsers(AddUsersReq) returns (AddUsersResp);
+ rpc UpdateUsers(UpdateUsersReq) returns (UpdateUsersResp);
+ rpc DelUsers(DelUsersReq) returns (DelUsersResp);
+ rpc GetUsersById(GetUsersByIdReq) returns (GetUsersByIdResp);
+ rpc GetUserByUsername(GetUserByUsernameReq) returns (GetUserByUsernameResp);
+ rpc SearchUsers(SearchUsersReq) returns (SearchUsersResp);
+}
diff --git a/desc/sql/users.sql b/desc/sql/users.sql
index d7a4226..78f9b06 100644
--- a/desc/sql/users.sql
+++ b/desc/sql/users.sql
@@ -1,13 +1,18 @@
create extension if not exists "uuid-ossp";
create extension if not exists "pg_trgm";
-CREATE TABLE users (
- user_id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
- username VARCHAR(50) UNIQUE NOT NULL,
- passwd VARCHAR(255) NOT NULL,
- nikename VARCHAR(50) NOT NULL,
- phone VARCHAR(20) UNIQUE NOT NULL,
- role_type SMALLINT NOT NULL, -- 1:玩家, 2:打手, 3:店长
- is_verified BOOLEAN DEFAULT false,
- created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
+
+create table users
+(
+ user_id uuid primary key default uuid_generate_v4(),
+ username VARCHAR(50) UNIQUE NOT NULL,
+ passwd VARCHAR(255) NOT NULL,
+ nickname VARCHAR(50) NOT NULL,
+ phone VARCHAR(20) UNIQUE NOT NULL,
+ role_type SMALLINT NOT NULL, -- 1:玩家, 2:打手, 3:店长
+ is_verified BOOLEAN DEFAULT false,
+ state BOOLEAN NOT NULL DEFAULT true,
+ created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
+ updated_at timestamp with time zone default current_timestamp,
+ deleted_at timestamp with time zone
);
\ No newline at end of file
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 0000000..d5cd3d7
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,95 @@
+# Envoy Gateway Configuration
+
+This document explains how the Envoy gateway is configured and how to modify it.
+
+## Files
+
+- envoy.yaml: ConfigMap + Deployment + Service for Envoy
+
+## Current Behavior
+
+- Envoy listens on port 8080 in the Pod and exposes port 80 via a ClusterIP Service.
+- All HTTP traffic is routed to user-api only.
+- gRPC is not exposed by this gateway.
+
+## Routing
+
+In envoy.yaml, routes are defined under:
+
+static_resources -> listeners -> http_connection_manager -> route_config -> virtual_hosts
+
+The current routing rules are:
+
+- All requests (prefix: "/") -> cluster: user-api
+
+To add a new HTTP service, add a new route above the default route and define a new cluster.
+
+Example: route /order to order-api-svc:8899
+
+1) Add a route match:
+
+- match:
+ prefix: "/order"
+ route:
+ cluster: order-api
+
+2) Add a cluster:
+
+- name: order-api
+ connect_timeout: 2s
+ type: STRICT_DNS
+ lb_policy: ROUND_ROBIN
+ load_assignment:
+ cluster_name: order-api
+ endpoints:
+ - lb_endpoints:
+ - endpoint:
+ address:
+ socket_address:
+ address: order-api-svc.juwan.svc.cluster.local
+ port_value: 8899
+
+## CSRF Protection
+
+Envoy uses a Lua filter for CSRF validation:
+
+- Safe methods (GET/HEAD/OPTIONS):
+ - If csrf_token cookie is missing, Envoy generates one and sets it in the response.
+- Unsafe methods (POST/PUT/PATCH/DELETE, etc):
+ - Requires BOTH:
+ - header: X-CSRF-Token
+ - cookie: csrf_token
+ - Values must match, otherwise Envoy returns 403.
+
+If you want a different cookie name or header name, update these in the Lua code:
+
+- Header: x-csrf-token
+- Cookie: csrf_token
+
+To relax or tighten rules, edit the functions:
+
+- is_safe(method)
+- envoy_on_request(request_handle)
+
+## Cookie Attributes
+
+Current Set-Cookie:
+
+csrf_token=; Path=/; SameSite=Strict
+
+To add Secure or HttpOnly, update the string in envoy_on_response.
+
+## Deployment
+
+Apply or update:
+
+kubectl apply -f deploy/k8s/envoy/envoy.yaml
+
+## Common Changes
+
+- Change listening port:
+ - Update listener port_value and Service targetPort/port.
+- Change service namespace:
+ - Update cluster DNS addresses (e.g. service.ns.svc.cluster.local).
+- Add more services:
+ - Add route + add cluster, as shown above.
diff --git a/docs/deployment-troubleshooting.md b/docs/deployment-troubleshooting.md
new file mode 100644
index 0000000..555b5ef
--- /dev/null
+++ b/docs/deployment-troubleshooting.md
@@ -0,0 +1,385 @@
+# Kubernetes 部署问题排查与解决记录
+
+**日期**: 2026年2月23日
+**问题**: user-rpc 和 Redis 部署失败
+**状态**: 已诊断,解决中
+
+---
+
+## 📋 问题描述
+
+执行 `kubectl apply -f test.yaml` 后,资源虽然创建成功,但实际的应用 pods 并未正常运行:
+
+```
+kubectl apply -f ..\test.yaml
+✓ deployment.apps/user-rpc created
+✓ service/user-rpc-svc created
+✓ horizontalpodautoscaler.autoscaling/user-rpc-hpa-c created
+✓ horizontalpodautoscaler.autoscaling/user-rpc-hpa-m created
+✓ redisreplication.redis.redis.opstreelabs.in/user-redis created
+✓ redissentinel.redis.redis.opstreelabs.in/user-redis-sentinel created
+✓ cluster.postgresql.cnpg.io/user-db created
+```
+
+但执行 `kubectl get all` 后,发现:
+- ❌ **user-rpc pods 未创建**(Deployment 0/3 replicas ready)
+- ❌ **Redis pods 未创建**(RedisReplication 资源存在但无 pods)
+- ✅ user-db pods 正常运行(3/3)
+
+---
+
+## 🔍 排查过程
+
+### 第一步:检查 Deployment 状态
+
+```bash
+kubectl describe deployment user-rpc
+```
+
+**发现**:
+```
+Conditions:
+ Type Status Reason
+ ---- ------ ------
+ Progressing True NewReplicaSetCreated
+ Available False MinimumReplicasUnavailable
+ ReplicaFailure True FailedCreate
+```
+
+### 第二步:检查 ReplicaSet 详情
+
+```bash
+kubectl describe replicaset user-rpc-6bf77fbcd9
+```
+
+**发现关键错误**:
+```
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Warning FailedCreate 3m53s replicaset-controller Error creating:
+ pods "user-rpc-6bf77fbcd9-" is forbidden: error looking up service
+ account default/find-endpoints: serviceaccount "find-endpoints" not found
+```
+
+**问题 #1 诊断完成**:❌ **缺失 ServiceAccount "find-endpoints"**
+
+### 第三步:检查现有 ServiceAccounts
+
+```bash
+kubectl get serviceaccount
+```
+
+**结果**:
+```
+NAME AGE
+cluster-example 4d10h
+default 13d
+redis-operator 9h
+user-db 4m9s
+```
+
+确认 `find-endpoints` 不存在。
+
+### 第四步:检查 Secrets
+
+```bash
+kubectl get secrets
+```
+
+**结果**:默认 secrets 都存在,包括:
+- ✅ user-db-app
+- ✅ user-redis
+- ✅ user-db-ca, user-db-replication, user-db-server
+
+### 第五步:检查 Redis 部署
+
+```bash
+kubectl get redisreplication
+kubectl get pods | grep redis
+```
+
+**发现**:
+- ✅ RedisReplication 资源存在
+- ❌ Redis pods **完全没有被创建**
+
+**问题 #2 诊断**:❌ **Redis Operator 未响应 RedisReplication 资源**
+
+---
+
+## 🔧 第一次修复尝试
+
+### 创建缺失的 ServiceAccount
+
+```bash
+kubectl create serviceaccount find-endpoints
+```
+
+**结果**:✅ ServiceAccount 创建成功
+
+### 重启 Deployment
+
+```bash
+kubectl rollout restart deployment user-rpc
+```
+
+**等待 5-10 秒后重新检查**:
+
+```bash
+kubectl get pods -o wide
+```
+
+**新的发现**:
+
+```
+NAME READY STATUS RESTARTS AGE
+user-rpc-66f97fbdcc-ws7rc 0/1 ErrImagePull 0 26s
+user-rpc-6bf77fbcd9-njm2z 0/1 ErrImagePull 0 29s
+user-rpc-6bf77fbcd9-nwjtw 0/1 ImagePullBackOff 0 29s
+user-rpc-6bf77fbcd9-wjrf8 0/1 ErrImagePull 0 29s
+```
+
+✅ **好消息**:Pods 现在被创建了!(说明 ServiceAccount 问题已解决)
+❌ **新问题**:镜像拉取失败
+
+---
+
+## 🐛 根因分析
+
+### 问题 #1:缺失 ServiceAccount ✅ 已解决
+
+**根本原因**:test.yaml 的 Deployment manifest 指定了:
+```yaml
+spec:
+ template:
+ spec:
+ serviceAccountName: find-endpoints # 这个 ServiceAccount 不存在
+```
+
+但没有在 test.yaml 中创建 ServiceAccount 资源。
+
+**解决方案**:
+```bash
+kubectl create serviceaccount find-endpoints
+```
+
+或在 test.yaml 中添加:
+```yaml
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: find-endpoints
+ namespace: default
+```
+
+---
+
+### 问题 #2:镜像拉取失败 ❌ 需要修复
+
+```bash
+kubectl describe pod user-rpc-6bf77fbcd9-njm2z
+```
+
+**详细错误日志**:
+
+```
+Events:
+ Warning Failed 38s kubelet Failed to pull image
+ "103.236.53.208:4418/library/user-rpc@sha256:76b27d3eb4d5d44e...":
+ Error response from daemon: Get "https://103.236.53.208:4418/v2/":
+ context deadline exceeded (Client.Timeout exceeded while awaiting headers)
+
+ Warning Failed 23s kubelet Failed to pull image
+ "103.236.53.208:4418/library/user-rpc@sha256:76b27d3eb4d5d44e...":
+ http: server gave HTTP response to HTTPS client
+```
+
+**根本原因分析**:
+
+1. **网络连接失败**:`context deadline exceeded` - 无法连接到镜像仓库
+2. **协议不匹配**:`http: server gave HTTP response to HTTPS client` -
+ - 地址 `103.236.53.208:4418` 应该是 HTTP 而不是 HTTPS
+ - Docker daemon 尝试用 HTTPS 连接,但服务器使用 HTTP
+
+**可能原因**:
+- 镜像仓库地址错误或不可访问
+- 镜像仓库需要特定的网络配置
+- 仓库服务器离线或配置不当
+
+---
+
+### 问题 #3:Redis 部署失败 ❌ 需要诊断
+
+**现象**:
+- RedisReplication 和 RedisSentinel CRD 资源创建成功
+- 但没有对应的 Redis pods 被创建
+- `kubectl get pods | grep redis` 返回空
+
+**可能原因**:
+
+1. **Redis Operator 未正常工作**
+ - Operator pod 可能存在问题
+ - Operator 未能监听到新的 RedisReplication 资源
+
+2. **CRD 或 API 版本问题**
+ - manifest 中使用的 API 版本 `v1beta2` 可能不匹配 Operator 版本
+
+3. **资源限制或权限问题**
+ - Operator 无权限创建 pods
+ - 集群资源限制阻止了 pod 创建
+
+---
+
+## ✅ 已执行的修复
+
+| # | 问题 | 修复方法 | 状态 |
+|---|------|---------|------|
+| 1 | 缺失 ServiceAccount | `kubectl create serviceaccount find-endpoints` | ✅ 完成 |
+| 2 | 镜像拉取失败 | 需要更新镜像地址或修复网络 | ⏳ 待处理 |
+| 3 | Redis pods 未创建 | 需要诊断 Operator 日志 | ⏳ 待诊断 |
+
+---
+
+## 🚀 下一步解决方案
+
+### 优先级 1:修复 user-rpc 镜像拉取
+
+**选项 A:使用本地/内部镜像**
+```yaml
+# 修改 test.yaml 中的镜像地址
+image: localhost:5000/user-rpc:latest # 本地私有仓库
+# 或
+image: user-rpc:latest # 本地镜像(如果已通过 docker load 导入)
+```
+
+**选项 B:修复仓库地址**
+```yaml
+# 如果 103.236.53.208:4418 确实是正确仓库
+image: http://103.236.53.208:4418/library/user-rpc:latest # 显式使用 HTTP
+```
+
+**验证步骤**:
+```bash
+# 检查镜像仓库连接性
+curl -v http://103.236.53.208:4418/v2/
+```
+
+### 优先级 2:诊断 Redis Operator
+
+```bash
+# 查看 Operator 日志
+kubectl logs -l app.kubernetes.io/name=redis-operator -f
+
+# 查看 Operator pod
+kubectl get pods -A | grep redis-operator
+
+# 查看 RedisReplication 详细信息
+kubectl describe redisreplication user-redis
+
+# 检查 Operator 权限(RBAC)
+kubectl get role,rolebinding,clusterrole,clusterrolebinding | grep redis
+```
+
+### 优先级 3:增强 test.yaml
+
+建议在 test.yaml 中添加缺失的资源定义:
+
+```yaml
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: find-endpoints
+ namespace: default
+
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: registry-credentials
+ namespace: default
+type: kubernetes.io/dockercfg
+data:
+ .dockercfg: # 如果需要私有仓库认证
+```
+
+---
+
+## 📊 当前集群状态
+
+### Pods 状态总结
+
+| 应用 | 期望副本 | 实际运行 | 状态 |
+|------|---------|---------|------|
+| user-db | 3 | 3 | ✅ 正常 |
+| user-rpc | 3 | 0 | ❌ 镜像拉取失败 |
+| Redis | 3 | 0 | ❌ Operator 未创建 |
+| Sentinel | 3 | 0 | ❌ Operator 未创建 |
+
+### Services 状态
+
+```
+✅ kubernetes (内置)
+✅ user-rpc-svc:9001
+✅ user-db-r:5432 (只读副本)
+✅ user-db-ro:5432 (只读副本)
+✅ user-db-rw:5432 (读写主副本)
+```
+
+### HPA 配置
+
+```
+✅ user-rpc-hpa-c (CPU 目标: 80%) - 无法工作(pods 未运行)
+✅ user-rpc-hpa-m (Memory 目标: 80%) - 无法工作(pods 未运行)
+```
+
+---
+
+## 📝 关键命令速查表
+
+```bash
+# 查看 Deployment 状态
+kubectl describe deployment user-rpc
+
+# 查看 ReplicaSet 错误事件
+kubectl describe replicaset user-rpc-6bf77fbcd9
+
+# 查看 Pod 详细错误
+kubectl describe pod user-rpc-6bf77fbcd9-njm2z
+
+# 查看 Pod 日志
+kubectl logs user-rpc-6bf77fbcd9-njm2z
+
+# 查看所有事件(按时间排序)
+kubectl get events --sort-by='.lastTimestamp'
+
+# 查看特定命名空间的所有资源
+kubectl get all -n default
+
+# 重新启动 deployment(强制重新创建 pods)
+kubectl rollout restart deployment user-rpc
+
+# 查看 Operator 日志
+kubectl logs -l app.kubernetes.io/name=redis-operator
+
+# 检查 CRD 注册状态
+kubectl api-resources | grep redis
+```
+
+---
+
+## 🎯 总结
+
+| 问题 | 原因 | 解决状态 |
+|------|------|---------|
+| **ServiceAccount 缺失** | manifest 中声明但未创建 | ✅ **已解决** |
+| **镜像拉取失败** | 仓库地址不可达或协议不匹配 | ⏳ **待处理** |
+| **Redis 未部署** | Operator 未响应 CRD | ⏳ **待诊断** |
+
+**建议行动**:
+1. 确认/修复 user-rpc 镜像地址
+2. 诊断 Redis Operator 状态
+3. 验证所有依赖的 ServiceAccounts 和 Secrets 是否存在
+4. 考虑在 test.yaml 中添加完整的资源定义,避免手工创建
+
diff --git a/docs/gozero-redis-configuration.md b/docs/gozero-redis-configuration.md
new file mode 100644
index 0000000..ae4e062
--- /dev/null
+++ b/docs/gozero-redis-configuration.md
@@ -0,0 +1,1497 @@
+# Go-Zero 框架 Redis 配置完全指南
+
+**框架版本:** go-zero v1.5+
+**Redis 版本:** 7.0.12
+**部署环境:** Kubernetes (juwan namespace)
+**文档日期:** 2026年2月22日
+
+---
+
+## 📋 目录
+
+1. [配置概览](#配置概览)
+2. [单节点模式](#单节点模式)
+3. [Sentinel 哨兵模式](#sentinel-哨兵模式)
+4. [集群模式](#集群模式)
+5. [配置项详解](#配置项详解)
+6. [代码实现](#代码实现)
+7. [常用操作示例](#常用操作示例)
+8. [高级特性](#高级特性)
+9. [性能优化](#性能优化)
+10. [故障排查](#故障排查)
+11. [最佳实践](#最佳实践)
+
+---
+
+## 🎯 配置概览
+
+### Go-Zero Redis 支持的模式
+
+| 模式 | Type 值 | 用途 | 高可用 | 推荐度 |
+|-----|---------|------|--------|--------|
+| **单节点** | `node` | 开发/测试 | ❌ | ⭐⭐ |
+| **Sentinel** | `sentinel` | 生产环境 | ✅ | ⭐⭐⭐⭐⭐ |
+| **集群** | `cluster` | 大规模分片 | ✅ | ⭐⭐⭐⭐ |
+
+### 配置文件位置
+
+```
+app/users/rpc/
+├── etc/
+│ └── pb.yaml ← Redis 配置写在这里
+├── internal/
+│ ├── config/
+│ │ └── config.go ← 定义配置结构
+│ └── svc/
+│ └── serviceContext.go ← 初始化 Redis 客户端
+└── pb.go
+```
+
+---
+
+## 🔵 单节点模式
+
+### 适用场景
+
+- ✅ 开发环境
+- ✅ 测试环境
+- ✅ POC 演示
+- ❌ 生产环境(无高可用)
+
+### 配置文件
+
+**`app/users/rpc/etc/pb.yaml`**
+```yaml
+Name: user.rpc
+ListenOn: 0.0.0.0:9001
+
+# Redis 单节点配置
+Redis:
+ Host: user-redis-master.juwan.svc.cluster.local:6379
+ Type: node
+ Pass: ${REDIS_PASSWORD} # 从环境变量读取
+ # Db: 0 # 可选,默认 0
+ # MaxIdle: 8 # 可选,连接池最大闲置连接数
+ # MaxActive: 0 # 可选,连接池最大活跃连接数,0 表示无限制
+
+Etcd:
+ Hosts:
+ - etcd-service.juwan.svc.cluster.local:2379
+ Key: user.rpc
+```
+
+### Config 结构
+
+**`app/users/rpc/internal/config/config.go`**
+```go
+package config
+
+import (
+ "github.com/zeromicro/go-zero/core/stores/redis"
+ "github.com/zeromicro/go-zero/zrpc"
+)
+
+type Config struct {
+ zrpc.RpcServerConf
+ Redis redis.RedisConf // Redis 配置
+}
+```
+
+### ServiceContext 初始化
+
+**`app/users/rpc/internal/svc/serviceContext.go`**
+```go
+package svc
+
+import (
+ "github.com/zeromicro/go-zero/core/stores/redis"
+ "juwan-backend/app/users/rpc/internal/config"
+)
+
+type ServiceContext struct {
+ Config config.Config
+ Redis *redis.Redis
+}
+
+func NewServiceContext(c config.Config) *ServiceContext {
+ return &ServiceContext{
+ Config: c,
+ Redis: redis.MustNewRedis(c.Redis), // 初始化 Redis
+ }
+}
+```
+
+### 使用示例
+
+**`app/users/rpc/internal/logic/getUsersByIdLogic.go`**
+```go
+package logic
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "juwan-backend/app/users/rpc/internal/svc"
+ "juwan-backend/app/users/rpc/pb"
+
+ "github.com/zeromicro/go-zero/core/logx"
+)
+
+type GetUsersByIdLogic struct {
+ ctx context.Context
+ svcCtx *svc.ServiceContext
+ logx.Logger
+}
+
+func NewGetUsersByIdLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetUsersByIdLogic {
+ return &GetUsersByIdLogic{
+ ctx: ctx,
+ svcCtx: svcCtx,
+ Logger: logx.WithContext(ctx),
+ }
+}
+
+func (l *GetUsersByIdLogic) GetUsersById(in *pb.GetUsersByIdReq) (*pb.GetUsersByIdResp, error) {
+ // 缓存 key
+ cacheKey := fmt.Sprintf("user:%d", in.Id)
+
+ // 1. 尝试从缓存获取
+ cached, err := l.svcCtx.Redis.Get(cacheKey)
+ if err == nil && cached != "" {
+ // 缓存命中
+ var user pb.User
+ if err := json.Unmarshal([]byte(cached), &user); err == nil {
+ l.Logger.Infof("Cache hit for user:%d", in.Id)
+ return &pb.GetUsersByIdResp{User: &user}, nil
+ }
+ }
+
+ // 2. 缓存未命中,从数据库查询
+ l.Logger.Infof("Cache miss for user:%d, querying DB", in.Id)
+ user := l.fetchUserFromDB(in.Id)
+ if user == nil {
+ return nil, fmt.Errorf("user not found")
+ }
+
+ // 3. 写入缓存(1小时过期)
+ userJSON, _ := json.Marshal(user)
+ err = l.svcCtx.Redis.Setex(cacheKey, string(userJSON), 3600)
+ if err != nil {
+ l.Logger.Errorf("Failed to set cache: %v", err)
+ }
+
+ return &pb.GetUsersByIdResp{User: user}, nil
+}
+
+func (l *GetUsersByIdLogic) fetchUserFromDB(id int64) *pb.User {
+ // 实际从数据库查询
+ // ...
+ return &pb.User{Id: id, Name: "John Doe"}
+}
+```
+
+---
+
+## 🟡 Sentinel 哨兵模式
+
+### 适用场景
+
+- ✅✅✅ **生产环境强烈推荐**
+- ✅ 自动故障转移
+- ✅ 高可用架构
+- ✅ 主从自动切换
+
+### 配置文件
+
+**`app/users/rpc/etc/pb.yaml`**
+```yaml
+Name: user.rpc
+ListenOn: 0.0.0.0:9001
+
+# Redis Sentinel 配置(推荐生产环境使用)
+Redis:
+ - Host: user-redis-sentinel-sentinel.juwan.svc.cluster.local:26379
+ Type: sentinel
+ Pass: ${REDIS_PASSWORD}
+
+# 或者使用完整配置
+# Redis:
+# Host: user-redis-sentinel-sentinel.juwan.svc.cluster.local:26379
+# Type: sentinel
+# Pass: ${REDIS_PASSWORD}
+# # Sentinel 特有配置
+# MasterName: mymaster # Sentinel 主节点名称,默认 mymaster
+
+Etcd:
+ Hosts:
+ - etcd-service.juwan.svc.cluster.local:2379
+ Key: user.rpc
+```
+
+### Config 结构(同单节点)
+
+**`app/users/rpc/internal/config/config.go`**
+```go
+package config
+
+import (
+ "github.com/zeromicro/go-zero/core/stores/redis"
+ "github.com/zeromicro/go-zero/zrpc"
+)
+
+type Config struct {
+ zrpc.RpcServerConf
+ Redis redis.RedisConf // 支持所有模式
+}
+```
+
+### ServiceContext 初始化(同单节点)
+
+**`app/users/rpc/internal/svc/serviceContext.go`**
+```go
+package svc
+
+import (
+ "github.com/zeromicro/go-zero/core/stores/redis"
+ "juwan-backend/app/users/rpc/internal/config"
+)
+
+type ServiceContext struct {
+ Config config.Config
+ Redis *redis.Redis
+}
+
+func NewServiceContext(c config.Config) *ServiceContext {
+ // go-zero 会根据 Type 自动选择连接模式
+ return &ServiceContext{
+ Config: c,
+ Redis: redis.MustNewRedis(c.Redis),
+ }
+}
+```
+
+### Sentinel 配置详解
+
+**完整配置选项:**
+```yaml
+Redis:
+ Host: user-redis-sentinel-sentinel.juwan.svc.cluster.local:26379
+ Type: sentinel
+ Pass: ${REDIS_PASSWORD}
+
+ # Sentinel 特有配置
+ MasterName: mymaster # Sentinel 监控的主节点名称
+
+ # 连接池配置(可选)
+ MaxIdle: 8 # 最大闲置连接数
+ MaxActive: 0 # 最大活跃连接数,0 表示无限制
+ IdleTimeout: 300 # 闲置连接超时时间(秒)
+
+ # 超时配置(可选)
+ ConnectTimeout: 5000 # 连接超时(毫秒)
+ ReadTimeout: 3000 # 读超时(毫秒)
+ WriteTimeout: 3000 # 写超时(毫秒)
+```
+
+### 优势说明
+
+```go
+// Sentinel 模式的自动故障处理流程:
+
+// 1. 应用连接到 Sentinel
+app → Sentinel Service (26379)
+
+// 2. Sentinel 返回当前主节点地址
+Sentinel → app: "主节点在 10.244.1.10:6379"
+
+// 3. 应用连接到主节点进行读写
+app → Redis Master (10.244.1.10:6379)
+
+// 4. 主节点故障,Sentinel 检测到
+Redis Master (✗ 宕机)
+Sentinel → 检测 → 投票 → 提升新主节点
+
+// 5. 应用下次请求时自动连接到新主节点
+app → Sentinel → "新主节点在 10.244.2.20:6379"
+app → New Redis Master (10.244.2.20:6379)
+
+// 整个过程应用无需重启,自动完成切换!
+```
+
+---
+
+## 🔴 集群模式
+
+### 适用场景
+
+- ✅ 大规模数据(需要分片)
+- ✅ 超高并发
+- ✅ 数据量超过单机内存
+- ⚠️ 配置和运维复杂度高
+
+### 配置文件
+
+**`app/users/rpc/etc/pb.yaml`**
+```yaml
+Name: user.rpc
+ListenOn: 0.0.0.0:9001
+
+# Redis Cluster 配置
+Redis:
+ - Host: redis-cluster-0.redis-cluster.juwan.svc.cluster.local:6379
+ - Host: redis-cluster-1.redis-cluster.juwan.svc.cluster.local:6379
+ - Host: redis-cluster-2.redis-cluster.juwan.svc.cluster.local:6379
+ Type: cluster
+ Pass: ${REDIS_PASSWORD}
+
+Etcd:
+ Hosts:
+ - etcd-service.juwan.svc.cluster.local:2379
+ Key: user.rpc
+```
+
+### Config 结构
+
+**`app/users/rpc/internal/config/config.go`**
+```go
+package config
+
+import (
+ "github.com/zeromicro/go-zero/core/stores/redis"
+ "github.com/zeromicro/go-zero/zrpc"
+)
+
+type Config struct {
+ zrpc.RpcServerConf
+ Redis redis.RedisConf
+}
+```
+
+### 集群模式特点
+
+**数据分片:**
+```
+应用请求
+ ↓
+根据 key 计算 hash slot (0-16383)
+ ↓
+路由到对应的分片节点
+ ↓
+┌─────────┬─────────┬─────────┐
+│ Shard 1 │ Shard 2 │ Shard 3 │
+│ 0-5460 │5461-10922│10923-16383│
+└─────────┴─────────┴─────────┘
+```
+
+**注意事项:**
+- ❌ 不支持多 key 操作(如 MGET, MSET)跨分片
+- ❌ 不支持事务(MULTI/EXEC)跨分片
+- ✅ 单 key 操作完全正常
+- ✅ 支持 hash tag 控制 key 分布
+
+---
+
+## 📚 配置项详解
+
+### redis.RedisConf 完整配置
+
+**结构定义:**
+```go
+type RedisConf struct {
+ Host string // Redis 地址
+ Type string // 类型: node, sentinel, cluster
+ Pass string // 密码
+ Db int // 数据库编号 (0-15),cluster 模式不支持
+
+ // Sentinel 模式专用
+ MasterName string // Sentinel 主节点名称
+
+ // 连接池配置
+ MaxIdle int // 最大闲置连接数
+ MaxActive int // 最大活跃连接数,0 表示无限制
+ IdleTimeout time.Duration // 闲置连接超时
+
+ // 超时配置
+ ConnectTimeout time.Duration // 连接超时
+ ReadTimeout time.Duration // 读超时
+ WriteTimeout time.Duration // 写超时
+
+ // TLS 配置(可选)
+ Tls bool // 是否启用 TLS
+}
+```
+
+### 各配置项说明
+
+#### 1. Host(必填)
+
+**单节点模式:**
+```yaml
+Redis:
+ Host: user-redis-master.juwan.svc.cluster.local:6379
+```
+
+**Sentinel 模式:**
+```yaml
+Redis:
+ Host: user-redis-sentinel-sentinel.juwan.svc.cluster.local:26379
+ # 注意:这里填 Sentinel 地址(端口 26379),不是 Redis 地址
+```
+
+**集群模式:**
+```yaml
+Redis:
+ # 可以填任意一个节点,客户端会自动发现其他节点
+ - Host: redis-cluster-0:6379
+ - Host: redis-cluster-1:6379
+ - Host: redis-cluster-2:6379
+```
+
+#### 2. Type(必填)
+
+| 值 | 说明 |
+|----|------|
+| `node` | 单节点模式 |
+| `sentinel` | Sentinel 哨兵模式 |
+| `cluster` | 集群模式 |
+
+#### 3. Pass(强烈推荐)
+
+**从环境变量读取(推荐):**
+```yaml
+Redis:
+ Pass: ${REDIS_PASSWORD}
+```
+
+**硬编码(不推荐):**
+```yaml
+Redis:
+ Pass: "your-password" # ❌ 不安全
+```
+
+#### 4. Db(可选,默认 0)
+
+**适用模式:** 仅 `node` 和 `sentinel` 模式
+
+```yaml
+Redis:
+ Db: 0 # 数据库编号 0-15
+```
+
+**注意:**
+- ❌ Cluster 模式不支持多数据库
+- ✅ 单节点和 Sentinel 支持 0-15
+
+#### 5. MaxIdle(可选,默认 8)
+
+```yaml
+Redis:
+ MaxIdle: 8 # 连接池中最大闲置连接数
+```
+
+**建议值:**
+- 低并发:`8`
+- 中并发:`16`
+- 高并发:`32` 或 `CPU 核心数 * 2`
+
+#### 6. MaxActive(可选,默认 0)
+
+```yaml
+Redis:
+ MaxActive: 0 # 0 表示无限制
+ # MaxActive: 100 # 或设置一个上限
+```
+
+**建议值:**
+- 开发环境:`0`(无限制)
+- 生产环境:`100-500`(根据实际负载)
+
+#### 7. IdleTimeout(可选,默认 300 秒)
+
+```yaml
+Redis:
+ IdleTimeout: 300 # 秒
+```
+
+**说明:** 闲置连接超过此时间会被关闭
+
+#### 8. 超时配置(可选)
+
+```yaml
+Redis:
+ ConnectTimeout: 5000 # 连接超时 5 秒
+ ReadTimeout: 3000 # 读超时 3 秒
+ WriteTimeout: 3000 # 写超时 3 秒
+```
+
+---
+
+## 💻 代码实现
+
+### 完整项目结构
+
+```
+app/users/rpc/
+├── etc/
+│ ├── pb.yaml # 开发环境配置
+│ └── pb-prod.yaml # 生产环境配置
+├── internal/
+│ ├── config/
+│ │ └── config.go # 配置结构定义
+│ ├── svc/
+│ │ └── serviceContext.go # 服务上下文(初始化 Redis)
+│ ├── logic/
+│ │ ├── addUsersLogic.go
+│ │ ├── getUsersByIdLogic.go
+│ │ └── ...
+│ └── server/
+│ └── usercenterServer.go
+├── pb/
+│ ├── users.pb.go
+│ └── users_grpc.pb.go
+└── usercenter.go # 主程序入口
+```
+
+### 1. 配置文件示例
+
+**开发环境 `etc/pb.yaml`:**
+```yaml
+Name: user.rpc
+ListenOn: 0.0.0.0:9001
+
+# 开发环境使用单节点
+Redis:
+ Host: localhost:6379
+ Type: node
+ Pass: dev_password
+ Db: 0
+
+Etcd:
+ Hosts:
+ - localhost:2379
+ Key: user.rpc
+
+Log:
+ Level: info
+ Mode: console
+```
+
+**生产环境 `etc/pb-prod.yaml`:**
+```yaml
+Name: user.rpc
+ListenOn: 0.0.0.0:9001
+
+# 生产环境使用 Sentinel
+Redis:
+ Host: user-redis-sentinel-sentinel.juwan.svc.cluster.local:26379
+ Type: sentinel
+ Pass: ${REDIS_PASSWORD}
+ MasterName: mymaster
+ MaxIdle: 16
+ MaxActive: 100
+ IdleTimeout: 300
+ ConnectTimeout: 5000
+ ReadTimeout: 3000
+ WriteTimeout: 3000
+
+Etcd:
+ Hosts:
+ - etcd-0.etcd.juwan.svc.cluster.local:2379
+ - etcd-1.etcd.juwan.svc.cluster.local:2379
+ - etcd-2.etcd.juwan.svc.cluster.local:2379
+ Key: user.rpc
+
+Log:
+ Level: error
+ Mode: file
+ Path: /var/log/user-rpc
+ KeepDays: 7
+```
+
+### 2. Config 定义
+
+**`internal/config/config.go`**
+```go
+package config
+
+import (
+ "github.com/zeromicro/go-zero/core/stores/redis"
+ "github.com/zeromicro/go-zero/zrpc"
+)
+
+type Config struct {
+ zrpc.RpcServerConf
+
+ // Redis 配置
+ Redis redis.RedisConf
+
+ // 其他配置...
+ // DB postgres.Config
+ // Kafka kafka.Config
+}
+```
+
+### 3. ServiceContext 初始化
+
+**`internal/svc/serviceContext.go`**
+```go
+package svc
+
+import (
+ "github.com/zeromicro/go-zero/core/stores/redis"
+ "juwan-backend/app/users/rpc/internal/config"
+)
+
+type ServiceContext struct {
+ Config config.Config
+ Redis *redis.Redis
+ // 其他依赖...
+ // DB *gorm.DB
+}
+
+func NewServiceContext(c config.Config) *ServiceContext {
+ // 初始化 Redis(支持所有模式:node, sentinel, cluster)
+ rdb := redis.MustNewRedis(c.Redis)
+
+ return &ServiceContext{
+ Config: c,
+ Redis: rdb,
+ }
+}
+```
+
+### 4. 主程序入口
+
+**`usercenter.go`**
+```go
+package main
+
+import (
+ "flag"
+ "fmt"
+
+ "juwan-backend/app/users/rpc/internal/config"
+ "juwan-backend/app/users/rpc/internal/server"
+ "juwan-backend/app/users/rpc/internal/svc"
+ "juwan-backend/app/users/rpc/pb"
+
+ "github.com/zeromicro/go-zero/core/conf"
+ "github.com/zeromicro/go-zero/core/service"
+ "github.com/zeromicro/go-zero/zrpc"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/reflection"
+)
+
+var configFile = flag.String("f", "etc/pb.yaml", "the config file")
+
+func main() {
+ flag.Parse()
+
+ // 加载配置
+ var c config.Config
+ conf.MustLoad(*configFile, &c)
+
+ // 初始化服务上下文
+ ctx := svc.NewServiceContext(c)
+
+ // 创建 gRPC 服务
+ s := zrpc.MustNewServer(c.RpcServerConf, func(grpcServer *grpc.Server) {
+ pb.RegisterUsercenterServer(grpcServer, server.NewUsercenterServer(ctx))
+
+ if c.Mode == service.DevMode || c.Mode == service.TestMode {
+ reflection.Register(grpcServer)
+ }
+ })
+ defer s.Stop()
+
+ fmt.Printf("Starting rpc server at %s...\n", c.ListenOn)
+ s.Start()
+}
+```
+
+---
+
+## 🔧 常用操作示例
+
+### 1. 基本读写操作
+
+```go
+package logic
+
+import (
+ "context"
+ "fmt"
+ "time"
+)
+
+type UserLogic struct {
+ ctx context.Context
+ svcCtx *svc.ServiceContext
+ logx.Logger
+}
+
+// Set 操作
+func (l *UserLogic) SetUser(userId int64, data string) error {
+ key := fmt.Sprintf("user:%d", userId)
+ return l.svcCtx.Redis.Set(key, data)
+}
+
+// Setex 操作(带过期时间)
+func (l *UserLogic) SetUserWithExpiry(userId int64, data string) error {
+ key := fmt.Sprintf("user:%d", userId)
+ // 缓存 1 小时
+ return l.svcCtx.Redis.Setex(key, data, 3600)
+}
+
+// Get 操作
+func (l *UserLogic) GetUser(userId int64) (string, error) {
+ key := fmt.Sprintf("user:%d", userId)
+ return l.svcCtx.Redis.Get(key)
+}
+
+// Del 操作
+func (l *UserLogic) DeleteUser(userId int64) error {
+ key := fmt.Sprintf("user:%d", userId)
+ _, err := l.svcCtx.Redis.Del(key)
+ return err
+}
+
+// Exists 检查
+func (l *UserLogic) UserExists(userId int64) (bool, error) {
+ key := fmt.Sprintf("user:%d", userId)
+ return l.svcCtx.Redis.Exists(key)
+}
+```
+
+### 2. Hash 操作
+
+```go
+// HSet 操作
+func (l *UserLogic) SetUserField(userId int64, field, value string) error {
+ key := fmt.Sprintf("user:%d", userId)
+ return l.svcCtx.Redis.Hset(key, field, value)
+}
+
+// HGet 操作
+func (l *UserLogic) GetUserField(userId int64, field string) (string, error) {
+ key := fmt.Sprintf("user:%d", userId)
+ return l.svcCtx.Redis.Hget(key, field)
+}
+
+// HGetAll 操作
+func (l *UserLogic) GetAllUserFields(userId int64) (map[string]string, error) {
+ key := fmt.Sprintf("user:%d", userId)
+ return l.svcCtx.Redis.Hgetall(key)
+}
+
+// HMSet 批量设置
+func (l *UserLogic) SetUserFields(userId int64, fields map[string]string) error {
+ key := fmt.Sprintf("user:%d", userId)
+ return l.svcCtx.Redis.Hmset(key, fields)
+}
+```
+
+### 3. List 操作
+
+```go
+// LPush 操作
+func (l *UserLogic) AddMessage(userId int64, message string) error {
+ key := fmt.Sprintf("messages:%d", userId)
+ _, err := l.svcCtx.Redis.Lpush(key, message)
+ return err
+}
+
+// LRange 操作
+func (l *UserLogic) GetMessages(userId int64, start, stop int) ([]string, error) {
+ key := fmt.Sprintf("messages:%d", userId)
+ return l.svcCtx.Redis.Lrange(key, start, stop)
+}
+
+// LLen 操作
+func (l *UserLogic) GetMessageCount(userId int64) (int, error) {
+ key := fmt.Sprintf("messages:%d", userId)
+ return l.svcCtx.Redis.Llen(key)
+}
+```
+
+### 4. Set 操作
+
+```go
+// SAdd 添加成员
+func (l *UserLogic) AddUserTag(userId int64, tag string) error {
+ key := fmt.Sprintf("user:tags:%d", userId)
+ _, err := l.svcCtx.Redis.Sadd(key, tag)
+ return err
+}
+
+// SMembers 获取所有成员
+func (l *UserLogic) GetUserTags(userId int64) ([]string, error) {
+ key := fmt.Sprintf("user:tags:%d", userId)
+ return l.svcCtx.Redis.Smembers(key)
+}
+
+// SIsMember 检查成员
+func (l *UserLogic) HasUserTag(userId int64, tag string) (bool, error) {
+ key := fmt.Sprintf("user:tags:%d", userId)
+ return l.svcCtx.Redis.Sismember(key, tag)
+}
+```
+
+### 5. Sorted Set 操作
+
+```go
+// ZAdd 添加成员
+func (l *UserLogic) AddToLeaderboard(userId int64, score int64) error {
+ key := "leaderboard"
+ _, err := l.svcCtx.Redis.Zadd(key, score, fmt.Sprintf("%d", userId))
+ return err
+}
+
+// ZRevRange 获取排行榜(从高到低)
+func (l *UserLogic) GetTopUsers(count int) ([]string, error) {
+ key := "leaderboard"
+ return l.svcCtx.Redis.Zrevrange(key, 0, int64(count-1))
+}
+
+// ZRank 获取排名
+func (l *UserLogic) GetUserRank(userId int64) (int64, error) {
+ key := "leaderboard"
+ return l.svcCtx.Redis.Zrank(key, fmt.Sprintf("%d", userId))
+}
+```
+
+### 6. 缓存模式实现
+
+**Cache-Aside Pattern(推荐):**
+```go
+func (l *UserLogic) GetUserById(userId int64) (*User, error) {
+ cacheKey := fmt.Sprintf("user:%d", userId)
+
+ // 1. 查缓存
+ cached, err := l.svcCtx.Redis.Get(cacheKey)
+ if err == nil && cached != "" {
+ var user User
+ if err := json.Unmarshal([]byte(cached), &user); err == nil {
+ return &user, nil
+ }
+ }
+
+ // 2. 查数据库
+ user, err := l.getUserFromDB(userId)
+ if err != nil {
+ return nil, err
+ }
+
+ // 3. 写缓存
+ userJSON, _ := json.Marshal(user)
+ l.svcCtx.Redis.Setex(cacheKey, string(userJSON), 3600)
+
+ return user, nil
+}
+
+func (l *UserLogic) UpdateUser(user *User) error {
+ // 1. 更新数据库
+ if err := l.updateUserInDB(user); err != nil {
+ return err
+ }
+
+ // 2. 删除缓存(下次读取时会重新加载)
+ cacheKey := fmt.Sprintf("user:%d", user.Id)
+ l.svcCtx.Redis.Del(cacheKey)
+
+ return nil
+}
+```
+
+### 7. 分布式锁
+
+```go
+// 获取分布式锁
+func (l *UserLogic) AcquireLock(key string, expiry int) (bool, error) {
+ lockKey := fmt.Sprintf("lock:%s", key)
+ return l.svcCtx.Redis.Setnx(lockKey, "1")
+}
+
+// 释放锁
+func (l *UserLogic) ReleaseLock(key string) error {
+ lockKey := fmt.Sprintf("lock:%s", key)
+ _, err := l.svcCtx.Redis.Del(lockKey)
+ return err
+}
+
+// 使用示例
+func (l *UserLogic) ProcessWithLock(userId int64) error {
+ lockKey := fmt.Sprintf("user:%d", userId)
+
+ // 获取锁
+ acquired, err := l.AcquireLock(lockKey, 10)
+ if err != nil {
+ return err
+ }
+ if !acquired {
+ return fmt.Errorf("failed to acquire lock")
+ }
+ defer l.ReleaseLock(lockKey)
+
+ // 执行业务逻辑
+ // ...
+
+ return nil
+}
+```
+
+### 8. Pipeline 批量操作
+
+```go
+// 使用 go-redis 原生客户端进行 Pipeline
+func (l *UserLogic) BatchSetUsers(users []*User) error {
+ // go-zero 的 Redis 包装了 go-redis,可以获取原生客户端
+ rdb := l.svcCtx.Redis
+
+ pipe := rdb.Pipelined(func(pip redis.Pipeliner) error {
+ for _, user := range users {
+ key := fmt.Sprintf("user:%d", user.Id)
+ userJSON, _ := json.Marshal(user)
+ pip.Set(context.Background(), key, userJSON, time.Hour)
+ }
+ return nil
+ })
+
+ return pipe
+}
+```
+
+---
+
+## 🚀 高级特性
+
+### 1. 缓存穿透防护(布隆过滤器)
+
+```go
+import "github.com/zeromicro/go-zero/core/bloom"
+
+type UserLogic struct {
+ ctx context.Context
+ svcCtx *svc.ServiceContext
+ filter *bloom.Filter
+}
+
+func (l *UserLogic) GetUserWithBloom(userId int64) (*User, error) {
+ // 1. 布隆过滤器检查
+ if !l.filter.Exists([]byte(fmt.Sprintf("%d", userId))) {
+ return nil, fmt.Errorf("user not found")
+ }
+
+ // 2. 查缓存
+ cacheKey := fmt.Sprintf("user:%d", userId)
+ cached, err := l.svcCtx.Redis.Get(cacheKey)
+ if err == nil && cached != "" {
+ var user User
+ json.Unmarshal([]byte(cached), &user)
+ return &user, nil
+ }
+
+ // 3. 查数据库
+ user, err := l.getUserFromDB(userId)
+ if err != nil {
+ return nil, err
+ }
+
+ // 4. 写缓存
+ userJSON, _ := json.Marshal(user)
+ l.svcCtx.Redis.Setex(cacheKey, string(userJSON), 3600)
+
+ return user, nil
+}
+```
+
+### 2. 缓存击穿防护(Singleflight)
+
+```go
+import "golang.org/x/sync/singleflight"
+
+type UserLogic struct {
+ ctx context.Context
+ svcCtx *svc.ServiceContext
+ sg singleflight.Group
+}
+
+func (l *UserLogic) GetUserWithSingleflight(userId int64) (*User, error) {
+ cacheKey := fmt.Sprintf("user:%d", userId)
+
+ // 使用 Singleflight 确保同一时刻只有一个请求查询
+ v, err, _ := l.sg.Do(cacheKey, func() (interface{}, error) {
+ // 1. 查缓存
+ cached, err := l.svcCtx.Redis.Get(cacheKey)
+ if err == nil && cached != "" {
+ var user User
+ json.Unmarshal([]byte(cached), &user)
+ return &user, nil
+ }
+
+ // 2. 查数据库
+ user, err := l.getUserFromDB(userId)
+ if err != nil {
+ return nil, err
+ }
+
+ // 3. 写缓存
+ userJSON, _ := json.Marshal(user)
+ l.svcCtx.Redis.Setex(cacheKey, string(userJSON), 3600)
+
+ return user, nil
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ return v.(*User), nil
+}
+```
+
+### 3. 缓存雪崩防护(随机过期时间)
+
+```go
+import (
+ "math/rand"
+ "time"
+)
+
+func (l *UserLogic) SetCacheWithRandomExpiry(key string, value string, baseExpiry int) error {
+ // 在基础过期时间上增加随机值(±20%)
+ randomOffset := rand.Intn(baseExpiry / 5)
+ expiry := baseExpiry + randomOffset - (baseExpiry / 10)
+
+ return l.svcCtx.Redis.Setex(key, value, expiry)
+}
+
+// 使用示例
+func (l *UserLogic) CacheUser(user *User) error {
+ key := fmt.Sprintf("user:%d", user.Id)
+ userJSON, _ := json.Marshal(user)
+
+ // 基础过期时间 1 小时,实际会在 48-72 分钟之间随机
+ return l.SetCacheWithRandomExpiry(key, string(userJSON), 3600)
+}
+```
+
+---
+
+## ⚡ 性能优化
+
+### 1. 连接池配置优化
+
+**根据并发量调整:**
+```yaml
+# 低并发(< 100 QPS)
+Redis:
+ MaxIdle: 8
+ MaxActive: 100
+
+# 中并发(100-1000 QPS)
+Redis:
+ MaxIdle: 16
+ MaxActive: 500
+
+# 高并发(> 1000 QPS)
+Redis:
+ MaxIdle: 32
+ MaxActive: 1000
+```
+
+### 2. 超时配置优化
+
+```yaml
+Redis:
+ # 连接超时:通常设置较大值
+ ConnectTimeout: 5000 # 5 秒
+
+ # 读写超时:设置较小值,快速失败
+ ReadTimeout: 1000 # 1 秒
+ WriteTimeout: 1000 # 1 秒
+```
+
+### 3. Pipeline 批量操作
+
+**避免循环调用:**
+```go
+// ❌ 不好:循环调用
+for _, user := range users {
+ key := fmt.Sprintf("user:%d", user.Id)
+ l.svcCtx.Redis.Set(key, user.Name)
+}
+
+// ✅ 推荐:使用 Pipeline
+pipe := l.svcCtx.Redis.Pipelined(func(pip redis.Pipeliner) error {
+ for _, user := range users {
+ key := fmt.Sprintf("user:%d", user.Id)
+ pip.Set(context.Background(), key, user.Name, 0)
+ }
+ return nil
+})
+```
+
+### 4. 合理的缓存过期时间
+
+```go
+const (
+ CacheExpiryShort = 300 // 5 分钟 - 热点数据
+ CacheExpiryMedium = 3600 // 1 小时 - 常规数据
+ CacheExpiryLong = 86400 // 1 天 - 冷数据
+)
+
+func (l *UserLogic) SetUserCache(user *User, expiry int) error {
+ key := fmt.Sprintf("user:%d", user.Id)
+ userJSON, _ := json.Marshal(user)
+ return l.svcCtx.Redis.Setex(key, string(userJSON), expiry)
+}
+```
+
+### 5. Key 命名规范
+
+```go
+// 推荐的 Key 命名规范
+const (
+ KeyPrefixUser = "user:" // user:123
+ KeyPrefixSession = "session:" // session:abc123
+ KeyPrefixCache = "cache:" // cache:user:list
+ KeyPrefixLock = "lock:" // lock:order:456
+ KeyPrefixCounter = "counter:" // counter:page:views
+)
+
+// 使用函数生成 Key
+func UserCacheKey(userId int64) string {
+ return fmt.Sprintf("%s%d", KeyPrefixUser, userId)
+}
+
+func SessionKey(sessionId string) string {
+ return fmt.Sprintf("%s%s", KeyPrefixSession, sessionId)
+}
+```
+
+---
+
+## 🔍 故障排查
+
+### 1. 连接失败
+
+**问题:** `dial tcp xxx:6379: i/o timeout`
+
+**排查步骤:**
+```bash
+# 1. 检查 Redis 服务
+kubectl get pods -n juwan | grep redis
+
+# 2. 检查 Service
+kubectl get svc -n juwan | grep redis
+
+# 3. 测试网络连通性
+kubectl run -it --rm nettest --image=busybox --restart=Never -n juwan -- \
+ nc -zv user-redis-master 6379
+
+# 4. 查看应用日志
+kubectl logs -f user-rpc-xxx -n juwan
+```
+
+**解决方案:**
+- 确认 Host 配置正确
+- 确认网络策略允许访问
+- 检查 Redis Pod 状态
+
+### 2. 认证失败
+
+**问题:** `NOAUTH Authentication required`
+
+**排查:**
+```go
+// 打印配置(调试用)
+func main() {
+ var c config.Config
+ conf.MustLoad(*configFile, &c)
+
+ // 检查密码是否正确加载
+ fmt.Printf("Redis Config: Host=%s, Pass=%s\n", c.Redis.Host, c.Redis.Pass)
+}
+```
+
+**解决方案:**
+- 确认环境变量 `REDIS_PASSWORD` 已设置
+- 确认 Secret 正确挂载
+- 检查密码是否正确
+
+### 3. 性能问题
+
+**慢查询检测:**
+```go
+import "time"
+
+func (l *UserLogic) GetUserWithMetrics(userId int64) (*User, error) {
+ start := time.Now()
+ defer func() {
+ duration := time.Since(start)
+ if duration > 100*time.Millisecond {
+ l.Logger.Warnf("Slow Redis query: %v", duration)
+ }
+ }()
+
+ // 执行查询
+ key := fmt.Sprintf("user:%d", userId)
+ cached, err := l.svcCtx.Redis.Get(key)
+ // ...
+}
+```
+
+**常见原因:**
+- 连接池耗尽 → 增大 MaxActive
+- 大 Value 传输 → 拆分或压缩数据
+- 网络延迟 → 检查网络质量
+
+### 4. 内存泄漏
+
+**检查连接是否正确关闭:**
+```go
+// go-zero 的 Redis 客户端会自动管理连接
+// 但如果使用原生 go-redis 客户端,需要手动关闭
+
+// ❌ 错误示例
+func bad() {
+ rdb := redis.NewClient(&redis.Options{...})
+ // 使用完后没有关闭
+}
+
+// ✅ 正确示例
+func good() {
+ rdb := redis.NewClient(&redis.Options{...})
+ defer rdb.Close()
+ // ...
+}
+```
+
+---
+
+## 📖 最佳实践
+
+### 1. 环境变量管理
+
+**Kubernetes Deployment:**
+```yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: user-rpc
+ namespace: juwan
+spec:
+ template:
+ spec:
+ containers:
+ - name: user-rpc
+ image: user-rpc:v1
+ env:
+ # 从 Secret 读取 Redis 密码
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: user-redis
+ key: password
+
+ # 从 ConfigMap 读取其他配置
+ - name: REDIS_HOST
+ valueFrom:
+ configMapKeyRef:
+ name: user-rpc-config
+ key: redis.host
+```
+
+### 2. 配置分离
+
+**开发、测试、生产环境分离:**
+```bash
+# 开发环境
+go run usercenter.go -f etc/pb-dev.yaml
+
+# 测试环境
+go run usercenter.go -f etc/pb-test.yaml
+
+# 生产环境
+./usercenter -f etc/pb-prod.yaml
+```
+
+### 3. 监控指标
+
+```go
+import (
+ "github.com/zeromicro/go-zero/core/metric"
+ "github.com/zeromicro/go-zero/core/prometheus"
+)
+
+var (
+ redisCacheHit = metric.NewCounterVec(&metric.CounterVecOpts{
+ Namespace: "user_rpc",
+ Subsystem: "redis",
+ Name: "cache_hit_total",
+ Help: "redis cache hit count",
+ Labels: []string{"key"},
+ })
+
+ redisCacheMiss = metric.NewCounterVec(&metric.CounterVecOpts{
+ Namespace: "user_rpc",
+ Subsystem: "redis",
+ Name: "cache_miss_total",
+ Help: "redis cache miss count",
+ Labels: []string{"key"},
+ })
+)
+
+func (l *UserLogic) GetUserWithMetrics(userId int64) (*User, error) {
+ cacheKey := fmt.Sprintf("user:%d", userId)
+
+ cached, err := l.svcCtx.Redis.Get(cacheKey)
+ if err == nil && cached != "" {
+ redisCacheHit.Inc("user")
+ var user User
+ json.Unmarshal([]byte(cached), &user)
+ return &user, nil
+ }
+
+ redisCacheMiss.Inc("user")
+ // 查询数据库...
+}
+```
+
+### 4. 错误处理
+
+```go
+func (l *UserLogic) GetUser(userId int64) (*User, error) {
+ cacheKey := fmt.Sprintf("user:%d", userId)
+
+ // 缓存查询失败不应该中断流程
+ cached, err := l.svcCtx.Redis.Get(cacheKey)
+ if err == nil && cached != "" {
+ var user User
+ if json.Unmarshal([]byte(cached), &user) == nil {
+ return &user, nil
+ }
+ }
+
+ // 缓存失败,降级查询数据库
+ user, err := l.getUserFromDB(userId)
+ if err != nil {
+ return nil, err
+ }
+
+ // 尝试回写缓存,失败不影响返回结果
+ go func() {
+ userJSON, _ := json.Marshal(user)
+ if err := l.svcCtx.Redis.Setex(cacheKey, string(userJSON), 3600); err != nil {
+ l.Logger.Errorf("Failed to set cache: %v", err)
+ }
+ }()
+
+ return user, nil
+}
+```
+
+### 5. 缓存更新策略
+
+**Write-Through(同步更新):**
+```go
+func (l *UserLogic) UpdateUser(user *User) error {
+ // 1. 更新数据库
+ if err := l.updateUserInDB(user); err != nil {
+ return err
+ }
+
+ // 2. 同步更新缓存
+ cacheKey := fmt.Sprintf("user:%d", user.Id)
+ userJSON, _ := json.Marshal(user)
+ if err := l.svcCtx.Redis.Setex(cacheKey, string(userJSON), 3600); err != nil {
+ l.Logger.Errorf("Failed to update cache: %v", err)
+ }
+
+ return nil
+}
+```
+
+**Write-Behind(异步更新):**
+```go
+func (l *UserLogic) UpdateUserAsync(user *User) error {
+ // 1. 立即更新缓存
+ cacheKey := fmt.Sprintf("user:%d", user.Id)
+ userJSON, _ := json.Marshal(user)
+ l.svcCtx.Redis.Setex(cacheKey, string(userJSON), 3600)
+
+ // 2. 异步更新数据库
+ go func() {
+ if err := l.updateUserInDB(user); err != nil {
+ l.Logger.Errorf("Failed to update DB: %v", err)
+ // 回滚缓存
+ l.svcCtx.Redis.Del(cacheKey)
+ }
+ }()
+
+ return nil
+}
+```
+
+---
+
+## 📚 参考资源
+
+### 官方文档
+- [go-zero 官方文档](https://go-zero.dev/)
+- [go-zero Redis 文档](https://go-zero.dev/docs/tutorials/redis)
+- [go-redis 文档](https://redis.uptrace.dev/)
+
+### 示例代码
+- [go-zero Examples](https://github.com/zeromicro/go-zero/tree/master/example)
+- [go-zero Book Store](https://github.com/zeromicro/go-zero-book-store)
+
+### 相关工具
+- [RedisInsight](https://redis.com/redis-enterprise/redis-insight/) - Redis 管理工具
+- [redis-cli](https://redis.io/docs/manual/cli/) - Redis 命令行工具
+
+---
+
+## 📝 总结
+
+### 快速开始检查清单
+
+- [ ] 1. 在 `config.go` 中定义 `Redis redis.RedisConf`
+- [ ] 2. 在配置文件中添加 Redis 配置
+- [ ] 3. 在 `ServiceContext` 中初始化 `redis.MustNewRedis(c.Redis)`
+- [ ] 4. 在 Kubernetes中配置环境变量 `REDIS_PASSWORD`
+- [ ] 5. 在 logic 中使用 `l.svcCtx.Redis`
+- [ ] 6. 测试连接是否正常
+
+### 生产环境推荐配置
+
+```yaml
+Redis:
+ Host: user-redis-sentinel-sentinel.juwan.svc.cluster.local:26379
+ Type: sentinel
+ Pass: ${REDIS_PASSWORD}
+ MasterName: mymaster
+ MaxIdle: 16
+ MaxActive: 100
+ ConnectTimeout: 5000
+ ReadTimeout: 3000
+ WriteTimeout: 3000
+```
+
+### 关键点提醒
+
+1. ✅ **生产环境必须使用 Sentinel 模式**
+2. ✅ **密码通过环境变量传递,不要硬编码**
+3. ✅ **合理设置过期时间,防止缓存雪崩**
+4. ✅ **使用 Pipeline 优化批量操作**
+5. ✅ **实现缓存降级策略,Redis 故障不影响主流程**
+
+---
+
+**文档版本:** 1.0
+**创建日期:** 2026年2月22日
+**维护者:** Backend Team
+**下次审查:** 2026年3月22日
diff --git a/docs/kubernetes-service-explanation.md b/docs/kubernetes-service-explanation.md
new file mode 100644
index 0000000..941e0c9
--- /dev/null
+++ b/docs/kubernetes-service-explanation.md
@@ -0,0 +1,743 @@
+# Redis Kubernetes Service 详细解析
+
+**问题:** 为什么 Redis 有 8 个 Service,但应用配置中只使用 `user-redis-sentinel-sentinel.juwan.svc.cluster.local:26379`?
+
+**日期:** 2026年2月22日
+
+---
+
+## 📋 目录
+
+1. [Service 概览](#service-概览)
+2. [Kubernetes Service 基础](#kubernetes-service-基础)
+3. [8 个 Service 的详细说明](#8-个-service-的详细说明)
+4. [为什么使用哪个 Service](#为什么使用哪个-service)
+5. [Service 创建原理](#service-创建原理)
+6. [网络流量路由](#网络流量路由)
+7. [故障排查](#故障排查)
+
+---
+
+## 📊 Service 概览
+
+### 当前 Redis 的 8 个 Service
+
+```bash
+$ kubectl get svc -n juwan | grep redis
+
+NAME TYPE CLUSTER-IP PORTS
+user-redis ClusterIP 10.103.91.84 6379/TCP,9121/TCP 33m
+user-redis-additional ClusterIP 10.107.228.48 6379/TCP 33m
+user-redis-headless ClusterIP None 6379/TCP 33m
+user-redis-master ClusterIP 10.97.120.76 6379/TCP 33m
+user-redis-replica ClusterIP 10.100.213.103 6379/TCP 33m
+user-redis-sentinel-sentinel ClusterIP 10.105.28.231 26379/TCP 32m
+user-redis-sentinel-sentinel-additional ClusterIP 10.97.111.42 26379/TCP 32m
+user-redis-sentinel-sentinel-headless ClusterIP None 26379/TCP 32m
+```
+
+### 按功能分类
+
+| 分类 | Service 名称 | 作用 |
+|-----|-------------|------|
+| **Redis 数据层** | user-redis | 通用入口 |
+| | user-redis-additional | 备用入口 |
+| | user-redis-master | 主节点专用 |
+| | user-redis-replica | 从节点专用 |
+| | user-redis-headless | Pod 间通信 |
+| **Sentinel 监控层** | user-redis-sentinel-sentinel | Sentinel 入口 ⭐ |
+| | user-redis-sentinel-sentinel-additional | 备用入口 |
+| | user-redis-sentinel-sentinel-headless | Sentinel 间通信 |
+
+---
+
+## 🔷 Kubernetes Service 基础
+
+### Service 的作用
+
+**Kubernetes 中的 Service 是什么?**
+
+```
+┌─────────────────────────────────────────────────┐
+│ Kubernetes Cluster │
+│ │
+│ Service (虚拟 IP + DNS) │
+│ ↓ │
+│ Endpoints (实际 Pod IP 列表) │
+│ ├─ 10.244.0.10:6379 (Pod 1) │
+│ ├─ 10.244.1.20:6379 (Pod 2) │
+│ └─ 10.244.2.30:6379 (Pod 3) │
+│ │
+│ 客户端 ──→ Service IP (稳定) ──→ Pod IP (变化) │
+└─────────────────────────────────────────────────┘
+```
+
+### Service 的三种类型
+
+| 类型 | CLUSTER-IP | 用途 | 示例 |
+|-----|-----------|------|------|
+| **ClusterIP** | ✅ 有 | 集群内访问 | 10.103.91.84 |
+| **ClusterIP
(Headless)** | ❌ None | Pod 间直接通信 | None |
+| **NodePort** | ✅ 有 | 集群外访问 | 10.103.91.84 |
+
+---
+
+## 🔍 8 个 Service 的详细说明
+
+### 第一组:Redis 数据层 Service(端口 6379)
+
+#### 1️⃣ user-redis(ClusterIP)
+
+**基本信息:**
+```yaml
+名称: user-redis
+类型: ClusterIP (有负载均衡)
+Cluster IP: 10.103.91.84
+端口: 6379/TCP, 9121/TCP
+DNS: user-redis.juwan.svc.cluster.local
+```
+
+**Endpoints 信息:**
+```bash
+$ kubectl get endpoints user-redis -n juwan
+
+NAME ENDPOINTS
+user-redis 10.244.0.10:6379,10.244.1.20:6379,10.244.2.30:6379
+```
+
+**负载均衡机制:**
+```
+客户端请求 ──→ Service IP (10.103.91.84)
+ ↓
+ kube-proxy (iptables/ipvs)
+ ↓
+ 随机选择一个 Pod
+ ├─ 10.244.0.10 (redis-0)
+ ├─ 10.244.1.20 (redis-1) ← 可能
+ └─ 10.244.2.30 (redis-2)
+```
+
+**特点:**
+- ✅ 对所有 Pod 轮询负载均衡
+- ✅ 包含 Redis 数据服务(6379)和 Exporter(9121)
+- ⚠️ 可能把写请求轮询到从节点导致失败
+
+**适用场景:**
+- 监控抓取(Prometheus 从 9121 端口抓指标)
+- 不关心读写分离的简单查询
+
+**为什么有 2 个端口?**
+```
+6379: Redis 数据服务
+9121: Prometheus Exporter 监控端口
+ └─ 暴露 Redis 性能指标给 Prometheus
+ (redis_up, redis_memory_used, etc.)
+```
+
+**不用这个的原因:**
+```
+❌ 如果直接使用 user-redis 进行读写:
+ ├─ 写请求可能被路由到从节点 (error)
+ ├─ 无法进行故障自动转移
+ └─ 依赖于手动更新配置
+```
+
+---
+
+#### 2️⃣ user-redis-additional(ClusterIP)
+
+**基本信息:**
+```yaml
+名称: user-redis-additional
+类型: ClusterIP (有负载均衡)
+Cluster IP: 10.107.228.48
+端口: 6379/TCP
+Endpoints: 同 user-redis
+```
+
+**作用:**
+- 功能完全同 `user-redis`
+- 提供额外的访问入口
+- 用于多租户/网络隔离场景
+
+**为什么有这个?**
+```
+场景:某些网络策略可能只允许访问特定 Service
+└─ 额外的 Service 提供备用入口
+```
+
+**不常用的原因:**
+- 大多数场景用 `user-redis` 就足够
+- `user-redis-additional` 是备用
+
+---
+
+#### 3️⃣ user-redis-headless(ClusterIP: None)
+
+**基本信息:**
+```yaml
+名称: user-redis-headless
+类型: ClusterIP (Headless Service)
+Cluster IP: None ← 关键:无虚拟 IP
+端口: 6379/TCP
+DNS: user-redis-headless.juwan.svc.cluster.local
+```
+
+**特殊之处:无虚拟 IP**
+
+```bash
+# 正常 Service 查询返回虚拟 IP
+$ nslookup user-redis.juwan.svc.cluster.local
+Name: user-redis.juwan.svc.cluster.local
+Address: 10.103.91.84 ← 虚拟 IP
+
+# Headless Service 查询返回所有 Pod IP
+$ nslookup user-redis-headless.juwan.svc.cluster.local
+Name: user-redis-headless.juwan.svc.cluster.local
+Address: 10.244.0.10 ← Pod 1 实际 IP
+Address: 10.244.1.20 ← Pod 2 实际 IP
+Address: 10.244.2.30 ← Pod 3 实际 IP
+```
+
+**使用场景:**
+
+```
+┌─────────────────────────────────────────────────┐
+│ StatefulSet (Redis Cluster/Replication) │
+│ │
+│ redis-0 (主) redis-1 (从) redis-2 (从) │
+│ ↓ ↓ ↓ │
+│ 10.244.0.10 10.244.1.20 10.244.2.30 │
+│ ↑ │
+│ 需要直接连接到特定 Pod: │
+│ redis-0.user-redis-headless (连接主节点) │
+│ redis-1.user-redis-headless (连接从节点) │
+└─────────────────────────────────────────────────┘
+```
+
+**谁在使用?**
+- Redis 主从复制:从节点需要连接到已知的主节点
+- Sentinel 监控:需要直接访问特定 Redis 实例
+- Redis Operator 内部使用
+
+**为什么应用不用这个?**
+```
+❌ Pod DNS 只能在 Pod 内使用
+ └─ 外部应用不知道 Pod 的具体 DNS 名称
+
+✅ 用虚拟 Service IP 的优势
+ └─ 无需关心底层 Pod 变化
+```
+
+---
+
+#### 4️⃣ user-redis-master(ClusterIP)
+
+**基本信息:**
+```yaml
+名称: user-redis-master
+类型: ClusterIP
+Cluster IP: 10.97.120.76
+端口: 6379/TCP
+Endpoints: 10.244.0.10:6379 (只有 1 个 Pod)
+DNS: user-redis-master.juwan.svc.cluster.local
+```
+
+**特点:只指向主节点**
+
+```bash
+$ kubectl get endpoints user-redis-master -n juwan
+
+NAME ENDPOINTS
+user-redis-master 10.244.0.10:6379 ← 仅主节点
+```
+
+**对比所有 Endpoints:**
+```
+user-redis-master: 10.244.0.10 (主)
+user-redis-replica: 10.244.1.20, 10.244.2.30 (从)
+user-redis: 所有 Pod
+```
+
+**为什么分开?**
+```
+┌─────────────────────────────────────────┐
+│ Redis 主从架构 │
+│ │
+│ Redis Master (10.244.0.10) │
+│ ├─ 处理所有写操作 │
+│ └─ 赋值数据给 Slave │
+│ │
+│ Redis Slave 1 (10.244.1.20) │
+│ └─ 处理只读操作 │
+│ │
+│ Redis Slave 2 (10.244.2.30) │
+│ └─ 处理只读操作 │
+└─────────────────────────────────────────┘
+
+请求分类:
+┌───────────────────────┐
+│ SET key value │ ──→ user-redis-master (10.97.120.76)
+│ HSET user:1 name john │
+└───────────────────────┘
+
+┌───────────────────────┐
+│ GET key │ ──→ user-redis-replica (10.100.213.103)
+│ HGET user:1 name │
+└───────────────────────┘
+```
+
+**适用场景:**
+- ✅ 读写分离架构
+- ✅ 优化读性能(从节点处理读)
+- ✅ 减轻主节点负担
+
+**为什么应用通常不直接用?**
+```
+❌ 需要在应用层面区分读写操作
+ ├─ 写操作 → user-redis-master
+ ├─ 只读操作 → user-redis-replica
+ └─ 代码复杂度高
+
+✅ Sentinel 模式自动处理
+ └─ 应用无需关心主从区别
+```
+
+---
+
+#### 5️⃣ user-redis-replica(ClusterIP)
+
+**基本信息:**
+```yaml
+名称: user-redis-replica
+类型: ClusterIP
+Cluster IP: 10.100.213.103
+端口: 6379/TCP
+Endpoints: 10.244.1.20:6379, 10.244.2.30:6379 (两个从节点)
+DNS: user-redis-replica.juwan.svc.cluster.local
+```
+
+**特点:只指向从节点,支持负载均衡**
+
+```bash
+$ kubectl get endpoints user-redis-replica -n juwan
+
+NAME ENDPOINTS
+user-redis-replica 10.244.1.20:6379, 10.244.2.30:6379
+```
+
+**读流量分散:**
+```
+应用发送 GET 请求
+ ↓
+ user-redis-replica (10.100.213.103)
+ ↓
+ 随机选择一个从节点
+ ├─ 10.244.1.20 (redis-1) ← 可能
+ └─ 10.244.2.30 (redis-2) ← 可能
+```
+
+**适用场景:**
+- 除了 Sentinel 模式外的读优化
+- 需要手动管理读写分离
+
+---
+
+### 第二组:Sentinel 监控层 Service(端口 26379)
+
+#### 6️⃣ user-redis-sentinel-sentinel(ClusterIP)⭐⭐⭐
+
+**基本信息:**
+```yaml
+名称: user-redis-sentinel-sentinel
+类型: ClusterIP
+Cluster IP: 10.105.28.231
+端口: 26379/TCP
+Endpoints: 10.244.0.50:26379, 10.244.1.70:26379, 10.244.2.90:26379
+ (3 个 Sentinel 实例)
+DNS: user-redis-sentinel-sentinel.juwan.svc.cluster.local
+```
+
+**为什么应用使用这个?**
+
+```
+应用程序配置:
+┌──────────────────────────────────────────────┐
+│ Redis: │
+│ Host: user-redis-sentinel-sentinel │
+│ Port: 26379 │
+│ Type: sentinel │
+│ MasterName: mymaster │
+└──────────────────────────────────────────────┘
+
+连接流程:
+┌─────────────────────────────────────────────┐
+│ 应用程序 │
+└────────────────────┬────────────────────────┘
+ │
+ ↓
+┌─────────────────────────────────────────────┐
+│ user-redis-sentinel-sentinel (26379) │
+│ ├─ Sentinel 1: 10.244.0.50:26379 │
+│ ├─ Sentinel 2: 10.244.1.70:26379 │
+│ └─ Sentinel 3: 10.244.2.90:26379 │
+└────────────────────┬────────────────────────┘
+ │
+ 应用询问: "mymaster 在哪?"
+ ↓
+ Sentinel 回答: "在 10.244.0.10:6379"
+ ↓
+┌─────────────────────────────────────────────┐
+│ Redis Master: 10.244.0.10:6379 │
+│ (应用直接连接进行读写) │
+└─────────────────────────────────────────────┘
+
+故障转移过程:
+Master 故障 → Sentinel 检测 → 提升新主节点
+ → 应用下次查询时 → 获得新主节点 IP
+ → 自动连接新主节点
+```
+
+**为什么这是最佳选择?**
+
+1. **自动故障转移**
+ ```
+ 主节点宕机 (✗) → Sentinel 自动选举新主 → 应用自动连接
+ ```
+
+2. **高可用**
+ ```
+ Sentinel 集群(3 个) → 任意 1-2 个故障仍可用
+ ```
+
+3. **应用无感知**
+ ```
+ 应用只需配置 MasterName: mymaster
+ 无需关心主从地址变化
+ ```
+
+4. **标准做法**
+ ```
+ ✅ 业界公认的 Redis 高可用方案
+ ✅ 最小化应用改动
+ ✅ 自动化程度最高
+ ```
+
+**为什么不用其他 Service?**
+
+```
+❌ user-redis-master/user-redis-replica
+ └─ 需要应用层区分读写,主从切换需要重启应用
+
+❌ user-redis/user-redis-additional
+ └─ 没有故障转移能力,故障时应用会报错
+
+✅ user-redis-sentinel-sentinel
+ └─ 自动发现新主节点,无需重启应用
+```
+
+---
+
+#### 7️⃣ user-redis-sentinel-sentinel-additional(ClusterIP)
+
+**说明:** 功能同 `user-redis-sentinel-sentinel`,备用入口
+
+---
+
+#### 8️⃣ user-redis-sentinel-sentinel-headless(ClusterIP: None)
+
+**说明:** 供 Sentinel 内部通信和选举使用
+
+---
+
+## 🎯 为什么使用哪个 Service
+
+### 应用配置选择
+
+#### ⭐⭐⭐ Sentinel 模式(生产推荐)
+
+```yaml
+# 应用配置
+Redis:
+ Host: user-redis-sentinel-sentinel.juwan.svc.cluster.local:26379
+ Type: sentinel
+ MasterName: mymaster
+ Pass: ${REDIS_PASSWORD}
+```
+
+**优势:**
+- ✅ 自动故障转移(RTO < 30 秒)
+- ✅ 应用无需重启
+- ✅ 自动发现新主节点
+- ✅ 生产标准做法
+
+---
+
+#### ⭐⭐ 主从分离模式(可选)
+
+```yaml
+# 应用配置(需要两个 host)
+Redis:
+ Master:
+ Host: user-redis-master.juwan.svc.cluster.local:6379
+ Slave:
+ Host: user-redis-replica.juwan.svc.cluster.local:6379
+```
+
+**适用场景:**
+- 读写分离显著
+- 对读性能有极高要求
+
+**缺点:**
+- 主从故障需手动切换
+- 应用层复杂度高
+
+---
+
+#### ❌ 不推荐的做法
+
+```yaml
+# ❌ 直接连接单个节点
+Redis:
+ Host: user-redis-0.user-redis-headless.juwan.svc.cluster.local:6379
+ # 问题:Pod 重启 IP 变化,需要更新配置
+
+# ❌ 连接通用 Service(无故障转移)
+Redis:
+ Host: user-redis.juwan.svc.cluster.local:6379
+ # 问题:无法自动转移,故障时应用报错
+
+# ❌ 硬编码 Pod IP
+Redis:
+ Host: 10.244.0.10:6379
+ # 问题:Pod 重启 IP 变化,应用立即不可用
+```
+
+---
+
+## 🔌 Service 创建原理
+
+### 为什么会自动创建这么多 Service?
+
+**由 Redis Operator 自动创建:**
+
+```go
+// Redis Operator 逻辑(伪代码)
+func CreateServicesForRedis(redis *RedisReplication) {
+ // 数据层 Service
+ CreateService("user-redis", AllRedisNodes)
+ CreateService("user-redis-additional", AllRedisNodes)
+ CreateService("user-redis-master", [MasterNode])
+ CreateService("user-redis-replica", [SlaveNodes])
+ CreateHeadlessService("user-redis-headless", AllRedisNodes)
+
+ // 监控层 Service
+ CreateService("user-redis-sentinel-sentinel", AllSentinelNodes)
+ CreateService("user-redis-sentinel-sentinel-additional", AllSentinelNodes)
+ CreateHeadlessService("user-redis-sentinel-sentinel-headless", AllSentinelNodes)
+}
+```
+
+**为什么这样设计?**
+
+| Service | 原因 |
+|---------|------|
+| 多个 ClusterIP | 不同场景需要不同的 Endpoints 配置 |
+| 包含 additional | 网络隔离/多租户支持 |
+| 包含 headless | StatefulSet 需要 Pod 间直接通信 |
+
+**类比:**
+```
+Redis Operator 就像一个完整的产品
+└─ 提供多种方式使用 Redis
+ ├─ 简单: user-redis
+ ├─ 高级: user-redis-master/replica
+ ├─ HA: user-redis-sentinel-sentinel
+ └─ 内部: headless services
+```
+
+---
+
+## 🌐 网络流量路由
+
+### 查询 Service 背后的 Pod
+
+**查看 Service Endpoints:**
+
+```bash
+# 查看 user-redis 关联的 Pod
+$ kubectl get endpoints user-redis -n juwan
+NAME ENDPOINTS
+user-redis 10.244.0.10:6379,10.244.1.20:6379,10.244.2.30:6379
+
+# 查看 user-redis-master 关联的 Pod
+$ kubectl get endpoints user-redis-master -n juwan
+NAME ENDPOINTS
+user-redis-master 10.244.0.10:6379
+
+# 查看 user-redis-replica 关联的 Pod
+$ kubectl get endpoints user-redis-replica -n juwan
+NAME ENDPOINTS
+user-redis-replica 10.244.1.20:6379,10.244.2.30:6379
+```
+
+**Pod 和 Service 的映射关系:**
+
+```
+Pods (实际运行的实例) Services (虚拟 IP)
+└─ redis-0 (主) └─ user-redis (所有)
+ ├─ 10.244.0.10 ├─ 10.103.91.84
+ └─ :6379
+ └─ user-redis-master (仅主)
+└─ redis-1 (从) ├─ 10.97.120.76
+ ├─ 10.244.1.20
+ └─ :6379
+ └─ user-redis-replica (仅从)
+└─ redis-2 (从) ├─ 10.100.213.103
+ ├─ 10.244.2.30
+ └─ :6379
+```
+
+**DNS 解析过程:**
+
+```
+应用 DNS 查询
+ └─ user-redis-master.juwan.svc.cluster.local
+ ↓
+CoreDNS (Kubernetes DNS)
+ └─ 查询并返回 Service IP:
+ ├─ 10.97.120.76 (user-redis-master)
+ ├─ 或 10.100.213.103 (user-redis-replica)
+ ├─ 或 10.103.91.84 (user-redis)
+ └─ 或 Sentinel 的 IP
+```
+
+**Sentinel 模式的特殊之处:**
+
+```
+应用查询 Sentinel
+ └─ user-redis-sentinel-sentinel.juwan.svc.cluster.local:26379
+ ↓
+Sentinel Service (负载均衡到 3 个 Sentinel 节点)
+ ↓
+Sentinel 节点 (任选一个)
+ ↓
+应用询问: "mymaster 主节点 IP 是什么?"
+ ↓
+Sentinel 回答: "10.244.0.10:6379"
+ ↓
+应用直接连接 Redis Master: 10.244.0.10:6379
+```
+
+---
+
+## 🔧 故障排查
+
+### 问题 1:为什么应用连接失败?
+
+**检查步骤:**
+
+```bash
+# 1. 验证 Service 存在
+kubectl get svc user-redis-sentinel-sentinel -n juwan
+
+# 2. 验证 Endpoints 不为空
+kubectl get endpoints user-redis-sentinel-sentinel -n juwan
+
+# 3. 测试 DNS 解析
+kubectl run -it --rm nettest --image=busybox --restart=Never -n juwan -- \
+ nslookup user-redis-sentinel-sentinel.juwan.svc.cluster.local
+
+# 4. 测试连接性
+kubectl run -it --rm nettest --image=busybox --restart=Never -n juwan -- \
+ nc -zv user-redis-sentinel-sentinel.juwan.svc.cluster.local 26379
+
+# 5. 查看应用日志
+kubectl logs -f user-rpc-xxx -n juwan
+```
+
+### 问题 2:为什么看不到某个 Service?
+
+```bash
+# 确保在正确的命名空间
+kubectl get svc -n juwan | grep redis
+
+# 如果 Redis Operator 有问题,Service 可能不会创建
+# 查看 Operator 日志
+kubectl logs -n default deployment/redis-operator
+```
+
+### 问题 3:Service IP 经常变化?
+
+```bash
+# Service IP 是稳定的(除非被删除和重建)
+# 如果频繁变化,说明 Service 被频繁重建
+
+# 检查 Service 创建事件
+kubectl describe svc user-redis-sentinel-sentinel -n juwan
+
+# 检查 Operator 是否有异常
+kubectl describe redissentinel user-redis-sentinel -n juwan
+```
+
+---
+
+## 📚 总结
+
+### 快速理解
+
+| Service | 用途 | 应用是否使用 |
+|---------|------|-----------|
+| **user-redis-sentinel-sentinel** | ⭐ Sentinel 高可用 | ✅ **生产推荐** |
+| user-redis-master | 直连主节点 | ⚠️ 需要读写分离 |
+| user-redis-replica | 直连从节点 | ⚠️ 需要读写分离 |
+| user-redis | 通用入口 | ❌ 不推荐(无 HA) |
+| headless services | 内部通信 | ❌ 应用不用 |
+
+### 为什么有这么多 Service?
+
+**答案:** 为了提供灵活的使用方式
+
+```
+Redis Operator 的设计理念:
+┌─────────────────────────────────────────┐
+│ 提供完整的 Redis 高可用解决方案 │
+│ │
+│ ├─ 简单使用场景 │
+│ │ └─ user-redis (所有节点) │
+│ │ │
+│ ├─ 高级使用场景 │
+│ │ ├─ user-redis-master (写) │
+│ │ └─ user-redis-replica (读) │
+│ │ │
+│ ├─ 生产场景 (推荐) │
+│ │ └─ user-redis-sentinel-sentinel │
+│ │ │
+│ └─ 内部通信 │
+│ └─ headless services │
+└─────────────────────────────────────────┘
+```
+
+### 应用该用哪个?
+
+**一句话:使用 `user-redis-sentinel-sentinel:26379` + Sentinel 模式**
+
+```yaml
+# 这是最佳实践
+Redis:
+ Host: user-redis-sentinel-sentinel.juwan.svc.cluster.local:26379
+ Type: sentinel
+ MasterName: mymaster
+```
+
+**为什么?**
+- ✅ 自动故障转移
+- ✅ 应用无需重启
+- ✅ 无需手工干预
+- ✅ 行业标准
+
+---
+
+**文档版本:** 1.0
+**创建日期:** 2026年2月22日
+**维护者:** DevOps Team
diff --git a/docs/redis-sentinel-troubleshooting.md b/docs/redis-sentinel-troubleshooting.md
new file mode 100644
index 0000000..2a555d9
--- /dev/null
+++ b/docs/redis-sentinel-troubleshooting.md
@@ -0,0 +1,779 @@
+# Redis Sentinel 部署问题诊断与修复报告
+
+**问题日期:** 2026年2月22日
+**命名空间:** juwan
+**涉及资源:** user-rpc deployment, RedisSentinel
+
+---
+
+## 📋 目录
+
+1. [问题背景](#问题背景)
+2. [问题现象](#问题现象)
+3. [诊断过程](#诊断过程)
+4. [根因分析](#根因分析)
+5. [解决方案](#解决方案)
+6. [修复步骤](#修复步骤)
+7. [验证结果](#验证结果)
+8. [后续建议](#后续建议)
+
+---
+
+## 🎯 问题背景
+
+### 部署目标
+部署一个简单的三节点 Redis Sentinel 哨兵集群作为缓存服务,供 user-rpc 服务使用。后续如有需要再扩展为分片集群。
+
+### 初始配置
+在 `deploy/k8s/service/user/user-rpc.yaml` 中配置了:
+- user-rpc Deployment(3副本)
+- user-rpc Service
+- HPA(CPU和内存)
+- **RedisSentinel 资源**
+- PostgreSQL Cluster
+
+---
+
+## 🔴 问题现象
+
+### 执行的操作
+```bash
+kubectl apply -f .\deploy\k8s\service\user\user-rpc.yaml
+```
+
+### 输出结果
+```
+deployment.apps/user-rpc configured
+service/user-rpc-svc unchanged
+horizontalpodautoscaler.autoscaling/user-rpc-hpa-c unchanged
+horizontalpodautoscaler.autoscaling/user-rpc-hpa-m unchanged
+redissentinel.redis.redis.opstreelabs.in/user-redis unchanged
+cluster.postgresql.cnpg.io/user-db unchanged
+```
+
+### 观察到的异常
+查看命名空间资源:
+```bash
+kubectl get all -n juwan
+```
+
+**发现:**
+- ✅ user-api pods 正常运行
+- ✅ user-rpc pods 正常运行
+- ✅ PostgreSQL clusters 正常运行
+- ❌ **没有任何 Redis 相关的 Pod**
+- ❌ **没有 Redis Service**
+
+---
+
+## 🔍 诊断过程
+
+### 步骤 1:检查 RedisSentinel 资源状态
+
+**目的:** 确认 RedisSentinel 资源是否被成功创建
+
+**命令:**
+```bash
+kubectl get redissentinel user-redis -n juwan
+```
+
+**输出:**
+```
+NAME AGE
+user-redis 9m56s
+```
+
+**分析:**
+- ✅ RedisSentinel 资源已创建
+- ❌ 但没有创建任何 Pod
+- **结论:** Operator 没有按照 RedisSentinel 规格创建实际资源
+
+---
+
+### 步骤 2:查看 RedisSentinel 详细信息
+
+**目的:** 检查资源的详细配置和事件
+
+**命令:**
+```bash
+kubectl describe redissentinel user-redis -n juwan
+```
+
+**关键输出:**
+```yaml
+API Version: redis.redis.opstreelabs.in/v1beta2
+Kind: RedisSentinel
+Metadata:
+ Creation Timestamp: 2026-02-22T12:41:47Z
+ Finalizers:
+ redisSentinelFinalizer
+ Generation: 2
+Spec:
+ Cluster Size: 3
+ Redis Sentinel Config:
+ Redis Replication Name: user-redis # ⚠️ 关键配置
+ Master Group Name: myMaster
+ Redis Port: 6379
+ Quorum: 2
+Events: # ⚠️ 没有任何事件
+```
+
+**分析:**
+- 配置中引用了 `redisReplicationName: user-redis`
+- 没有任何 Events,说明 Operator 可能在等待某些前置条件
+- **需要进一步检查依赖资源**
+
+---
+
+### 步骤 3:检查 Secret 资源
+
+**目的:** 确认 RedisSentinel 所需的密码 Secret 是否存在
+
+**命令:**
+```bash
+kubectl get secret user-redis -n juwan
+```
+
+**输出:**
+```
+NAME TYPE DATA AGE
+user-redis Opaque 1 5h48m
+```
+
+**分析:**
+- ✅ Secret 存在,排除认证配置问题
+
+---
+
+### 步骤 4:查找 Redis Pods
+
+**目的:** 确认是否有 Redis Pod 使用不同的命名规则
+
+**命令:**
+```bash
+kubectl get pods -n juwan | Select-String redis
+```
+
+**输出:**
+```
+(空输出 - 没有找到任何 Redis Pod)
+```
+
+**分析:**
+- ❌ 确认没有任何 Redis Pod 被创建
+
+---
+
+### 步骤 5:检查 StatefulSet
+
+**目的:** Redis 通常使用 StatefulSet 部署,检查是否有相关资源
+
+**命令:**
+```bash
+kubectl get statefulset -n juwan
+```
+
+**输出:**
+```
+No resources found in juwan namespace.
+```
+
+**分析:**
+- ❌ 没有 StatefulSet 被创建
+- **结论:** Operator 完全没有执行创建动作
+
+---
+
+### 步骤 6:检查 Redis Operator 状态
+
+**目的:** 确认 Redis Operator 本身是否正常运行
+
+**命令:**
+```bash
+kubectl get pods -A | Select-String redis-operator
+```
+
+**输出:**
+```
+default redis-operator-7dcf4468c9-gfbcm 1/1 Running 0 4h50m
+ot-operators redis-operator-69bb645f5b-6kxkv 0/1 ErrImageNeverPull 0 4h48m
+```
+
+**分析:**
+- ✅ default namespace 中的 Operator 正常运行
+- ⚠️ ot-operators namespace 中的 Operator 镜像拉取失败(但不影响我们的场景)
+
+---
+
+### 步骤 7:查看 Operator 日志
+
+**目的:** 从 Operator 日志中寻找线索
+
+**命令:**
+```bash
+kubectl logs redis-operator-7dcf4468c9-gfbcm -n default --tail=50
+```
+
+**关键输出:**
+```json
+{"level":"info","ts":"2026-02-22T08:01:56Z","msg":"Starting Controller","controller":"redissentinel"}
+{"level":"info","ts":"2026-02-22T08:01:56Z","msg":"Starting workers","controller":"redissentinel","worker count":1}
+```
+
+**分析:**
+- ✅ RedisSentinel Controller 已启动
+- ✅ 没有错误日志
+- ❌ 但也没有处理 user-redis 资源的日志
+- **推测:** Operator 在等待某个依赖资源
+
+---
+
+### 步骤 8:检查 RedisReplication 资源(关键发现)
+
+**目的:** 根据 RedisSentinel 配置中的 `redisReplicationName: user-redis`,检查对应的 RedisReplication 是否存在
+
+**命令:**
+```bash
+kubectl get redisreplication -n juwan
+```
+
+**输出:**
+```
+No resources found in juwan namespace.
+```
+
+**分析:**
+- ❌ **RedisReplication 资源不存在!**
+- 🔎 **这就是问题的根本原因**
+
+---
+
+## 💡 根因分析
+
+### 问题根源
+
+**RedisSentinel 依赖 RedisReplication,但配置中只创建了 RedisSentinel,没有创建 RedisReplication。**
+
+### Redis Operator 架构理解
+
+在 OpsTree Redis Operator 中,资源之间的关系如下:
+
+```
+┌─────────────────────────────────────────┐
+│ RedisSentinel (哨兵层) │
+│ - 3个 Sentinel 节点 │
+│ - 负责监控和自动故障转移 │
+│ - 引用: redisReplicationName │
+└──────────────┬──────────────────────────┘
+ │ 监控
+ ↓
+┌─────────────────────────────────────────┐
+│ RedisReplication (数据层) │
+│ - 1个 Master + N个 Replica │
+│ - 提供实际的缓存服务 │
+│ - 主从复制 │
+└─────────────────────────────────────────┘
+```
+
+### 错误配置的问题
+
+原始配置直接创建了 RedisSentinel,但:
+
+1. **缺少被监控对象:** Sentinel 需要监控一个 RedisReplication 集群
+2. **引用不存在的资源:** `redisReplicationName: user-redis` 指向一个不存在的 RedisReplication
+3. **Operator 行为:** Operator 发现依赖的 RedisReplication 不存在,因此不会创建 Sentinel Pod
+
+### 为什么没有错误提示?
+
+- CRD 验证只检查语法和字段类型
+- 资源引用关系由 Operator 运行时检查
+- Operator 采用了"等待依赖"策略,而不是报错
+
+---
+
+## ✅ 解决方案
+
+### 正确的部署顺序
+
+1. **先创建 RedisReplication**(建立 Redis 主从复制集群)
+2. **再创建 RedisSentinel**(监控上述复制集群)
+
+### 配置结构
+
+```yaml
+# 第一步:创建 Redis 主从复制(数据层)
+apiVersion: redis.redis.opstreelabs.in/v1beta2
+kind: RedisReplication
+metadata:
+ name: user-redis # Sentinel 将引用这个名称
+ namespace: juwan
+spec:
+ clusterSize: 3 # 1 Master + 2 Replicas
+ kubernetesConfig:
+ image: quay.io/opstree/redis:v7.0.12
+ resources:
+ requests:
+ cpu: 100m
+ memory: 128Mi
+ limits:
+ cpu: 500m
+ memory: 512Mi
+ redisSecret:
+ name: user-redis
+ key: password
+ storage:
+ volumeClaimTemplate:
+ spec:
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: 1Gi # 每个 Redis 节点 1GB 存储
+
+---
+# 第二步:创建 Sentinel 监控(监控层)
+apiVersion: redis.redis.opstreelabs.in/v1beta2
+kind: RedisSentinel
+metadata:
+ name: user-redis-sentinel # 使用不同的名称避免混淆
+ namespace: juwan
+spec:
+ clusterSize: 3 # 3个 Sentinel 节点(推荐奇数)
+ kubernetesConfig:
+ image: quay.io/opstree/redis-sentinel:v7.0.12 # 使用 Sentinel 专用镜像
+ redisSentinelConfig:
+ redisReplicationName: user-redis # 引用上面的 RedisReplication
+ masterGroupName: mymaster
+ quorum: "2" # 需要 2 个 Sentinel 同意才能进行故障转移
+```
+
+---
+
+## 🔧 修复步骤
+
+### 步骤 1:删除错误的 RedisSentinel 资源
+
+**命令:**
+```bash
+kubectl delete redissentinel user-redis -n juwan
+```
+
+**输出:**
+```
+redissentinel.redis.redis.opstreelabs.in "user-redis" deleted
+```
+
+**说明:** 删除仅创建了 CRD 实例但未创建实际 Pod 的资源
+
+---
+
+### 步骤 2:更新配置文件
+
+修改 `deploy/k8s/service/user/user-rpc.yaml`,将单独的 RedisSentinel 替换为:
+1. RedisReplication(数据层)
+2. RedisSentinel(监控层)
+
+**变更内容:**
+- 添加 `RedisReplication` 资源定义
+- 添加 `storage.volumeClaimTemplate` 配置
+- 修改 RedisSentinel 的 `metadata.name` 为 `user-redis-sentinel`
+- 使用正确的 Sentinel 镜像:`quay.io/opstree/redis-sentinel:v7.0.12`
+- 完善 Sentinel 配置参数
+
+---
+
+### 步骤 3:应用更新后的配置
+
+**命令:**
+```bash
+kubectl apply -f .\deploy\k8s\service\user\user-rpc.yaml
+```
+
+**输出:**
+```
+deployment.apps/user-rpc configured
+service/user-rpc-svc unchanged
+horizontalpodautoscaler.autoscaling/user-rpc-hpa-c unchanged
+horizontalpodautoscaler.autoscaling/user-rpc-hpa-m unchanged
+redisreplication.redis.redis.opstreelabs.in/user-redis created ✅
+redissentinel.redis.redis.opstreelabs.in/user-redis-sentinel created ✅
+cluster.postgresql.cnpg.io/user-db unchanged
+```
+
+**分析:**
+- ✅ RedisReplication 成功创建
+- ✅ RedisSentinel 成功创建
+- 🎯 两个资源都是新创建(created),符合预期
+
+---
+
+## ✅ 验证结果
+
+### 验证 1:检查 Pod 创建情况(等待 30 秒)
+
+**命令:**
+```bash
+kubectl get statefulset,pods -n juwan | Select-String -Pattern "user-redis|NAME"
+```
+
+**输出:**
+```
+NAME READY AGE
+statefulset.apps/user-redis 3/3 81s ✅
+statefulset.apps/user-redis-sentinel-sentinel 3/3 24s ✅
+
+NAME READY STATUS RESTARTS AGE
+pod/user-redis-0 2/2 Running 0 80s ✅
+pod/user-redis-1 2/2 Running 0 52s ✅
+pod/user-redis-2 2/2 Running 0 47s ✅
+pod/user-redis-sentinel-sentinel-0 1/1 Running 0 24s ✅
+pod/user-redis-sentinel-sentinel-1 1/1 Running 0 8s ✅
+pod/user-redis-sentinel-sentinel-2 1/1 Running 0 5s ✅
+```
+
+**分析:**
+- ✅ **RedisReplication** 创建了 3 个 Pod(user-redis-0/1/2)
+ - 每个 Pod 有 2 个容器(2/2):Redis + Exporter
+ - 所有 Pod 处于 Running 状态
+- ✅ **RedisSentinel** 创建了 3 个 Pod(user-redis-sentinel-sentinel-0/1/2)
+ - 每个 Pod 有 1 个容器(1/1):Sentinel
+ - 所有 Pod 处于 Running 状态
+- ✅ 创建了 2 个 StatefulSet,READY 状态为 3/3
+
+---
+
+### 验证 2:检查 Service 资源
+
+**命令:**
+```bash
+kubectl get svc -n juwan | Select-String -Pattern "redis|NAME"
+```
+
+**输出:**
+```
+NAME TYPE CLUSTER-IP PORT(S) AGE
+user-redis ClusterIP 10.103.91.84 6379/TCP,9121/TCP 95s ✅
+user-redis-additional ClusterIP 10.107.228.48 6379/TCP 95s
+user-redis-headless ClusterIP None 6379/TCP 95s ✅
+user-redis-master ClusterIP 10.97.120.76 6379/TCP 95s ✅
+user-redis-replica ClusterIP 10.100.213.103 6379/TCP 95s ✅
+user-redis-sentinel-sentinel ClusterIP 10.105.28.231 26379/TCP 40s ✅
+user-redis-sentinel-sentinel-additional ClusterIP 10.97.111.42 26379/TCP 39s
+user-redis-sentinel-sentinel-headless ClusterIP None 26379/TCP 41s
+```
+
+**Service 功能说明:**
+
+#### Redis 数据层 Service(端口 6379)
+- **user-redis-master**: 主节点服务,用于写操作
+- **user-redis-replica**: 从节点服务,用于读操作
+- **user-redis**: 通用访问入口(负载均衡到所有节点)
+- **user-redis-headless**: 无头服务,用于 StatefulSet Pod 间通信
+- **user-redis-additional**: 额外的访问入口
+
+#### Sentinel 监控层 Service(端口 26379)
+- **user-redis-sentinel-sentinel**: Sentinel 访问入口
+- **user-redis-sentinel-sentinel-headless**: Sentinel 节点间通信
+- **user-redis-sentinel-sentinel-additional**: 额外的 Sentinel 访问入口
+
+---
+
+### 验证 3:检查完整的集群状态
+
+**命令:**
+```bash
+kubectl get all -n juwan
+```
+
+**最终状态统计:**
+
+| 资源类型 | 名称 | 数量 | 状态 |
+|---------|------|------|------|
+| **Deployment** | user-api | 3/3 | ✅ Running |
+| **Deployment** | user-rpc | 3/3 | ✅ Running |
+| **StatefulSet** | cluster-example (PostgreSQL) | 3/3 | ✅ Running |
+| **StatefulSet** | user-db (PostgreSQL) | 3/3 | ✅ Running |
+| **StatefulSet** | user-redis (Redis 数据) | 3/3 | ✅ Running |
+| **StatefulSet** | user-redis-sentinel-sentinel | 3/3 | ✅ Running |
+
+**Pod 总计:** 18 个(全部 Running)
+**Service 总计:** 13 个
+**HPA 总计:** 6 个
+
+---
+
+## 📊 架构图
+
+### 部署后的 Redis 架构
+
+```
+┌────────────────────────────────────────────────────────────┐
+│ 应用层 (user-rpc) │
+│ │
+│ [需要添加 Redis 连接配置] │
+└──────────┬─────────────────────────────┬───────────────────┘
+ │ │
+ │ 写操作 │ 读操作
+ ↓ ↓
+ ┌─────────────┐ ┌─────────────┐
+ │ user-redis- │ │ user-redis- │
+ │ master │ │ replica │
+ │ Service │ │ Service │
+ └─────────────┘ └─────────────┘
+ │ │
+ └──────────┬──────────────────┘
+ ↓
+ ┌──────────────────────────────────────────┐
+ │ RedisReplication (数据层) │
+ │ │
+ │ ┌──────────┐ ┌──────────┐ ┌───────┐ │
+ │ │ Master │→ │ Replica │→ │Replica│ │
+ │ │ redis-0 │ │ redis-1 │ │redis-2│ │
+ │ └──────────┘ └──────────┘ └───────┘ │
+ └──────────────────────────────────────────┘
+ ↑
+ │ 监控 & 故障转移
+ │
+ ┌──────────────────────────────────────────┐
+ │ RedisSentinel (监控层) │
+ │ │
+ │ ┌──────────┐ ┌──────────┐ ┌───────┐ │
+ │ │Sentinel-0│ │Sentinel-1│ │Sentinel-2│
+ │ └──────────┘ └──────────┘ └───────┘ │
+ │ │
+ │ Quorum: 2/3 (多数派决策) │
+ └──────────────────────────────────────────┘
+```
+
+---
+
+## 📝 后续建议
+
+### 1. 应用集成 Redis
+
+user-rpc 服务目前还没有配置 Redis 连接,需要:
+
+#### 修改配置文件 `app/users/rpc/etc/pb.yaml`
+```yaml
+Name: pb.rpc
+ListenOn: 0.0.0.0:8080
+
+# 添加 Redis 配置(使用 Sentinel 模式)
+Redis:
+ - Host: user-redis-sentinel-sentinel:26379
+ Type: sentinel
+ MasterName: mymaster
+ Pass: ${REDIS_PASSWORD}
+
+# 或使用主从模式
+# Redis:
+# - Host: user-redis-master:6379 # 写
+# Type: node
+# Pass: ${REDIS_PASSWORD}
+# - Host: user-redis-replica:6379 # 读
+# Type: node
+# Pass: ${REDIS_PASSWORD}
+
+Etcd:
+ Hosts:
+ - etcd-service:2379 # 需要配置实际的 Etcd 地址
+ Key: pb.rpc
+```
+
+#### 修改 Config 结构 `app/users/rpc/internal/config/config.go`
+```go
+package config
+
+import (
+ "github.com/zeromicro/go-zero/core/stores/redis"
+ "github.com/zeromicro/go-zero/zrpc"
+)
+
+type Config struct {
+ zrpc.RpcServerConf
+ Redis redis.RedisConf // 添加 Redis 配置
+}
+```
+
+#### 初始化 Redis 客户端 `app/users/rpc/internal/svc/serviceContext.go`
+```go
+package svc
+
+import (
+ "github.com/zeromicro/go-zero/core/stores/redis"
+ "juwan-backend/app/users/rpc/internal/config"
+)
+
+type ServiceContext struct {
+ Config config.Config
+ Redis *redis.Redis // 添加 Redis 客户端
+}
+
+func NewServiceContext(c config.Config) *ServiceContext {
+ return &ServiceContext{
+ Config: c,
+ Redis: redis.MustNewRedis(c.Redis), // 初始化 Redis
+ }
+}
+```
+
+#### 更新 Deployment 环境变量
+```yaml
+# deploy/k8s/service/user/user-rpc.yaml
+env:
+ - name: DB_URI
+ valueFrom:
+ secretKeyRef:
+ name: user-db-app
+ key: uri
+ - name: REDIS_PASSWORD # 添加 Redis 密码
+ valueFrom:
+ secretKeyRef:
+ name: user-redis
+ key: password
+```
+
+---
+
+### 2. Redis 性能监控
+
+已启用 Redis Exporter(端口 9121),可以配置 Prometheus 监控:
+
+```yaml
+apiVersion: v1
+kind: ServiceMonitor
+metadata:
+ name: user-redis-metrics
+ namespace: juwan
+spec:
+ selector:
+ matchLabels:
+ app: user-redis
+ endpoints:
+ - port: redis-exporter
+ interval: 30s
+```
+
+**监控指标:**
+- redis_up: 实例状态
+- redis_connected_clients: 连接数
+- redis_memory_used_bytes: 内存使用
+- redis_commands_processed_total: 命令处理数
+- redis_master_repl_offset: 复制偏移量
+
+---
+
+### 3. 高可用性测试
+
+#### 测试主节点故障转移
+```bash
+# 1. 查找当前主节点
+kubectl exec -it user-redis-sentinel-sentinel-0 -n juwan -- redis-cli -p 26379 SENTINEL get-master-addr-by-name mymaster
+
+# 2. 模拟主节点故障
+kubectl delete pod user-redis-0 -n juwan
+
+# 3. 观察 Sentinel 的故障转移过程
+kubectl logs -f user-redis-sentinel-sentinel-0 -n juwan
+
+# 4. 确认新主节点
+kubectl exec -it user-redis-sentinel-sentinel-0 -n juwan -- redis-cli -p 26379 SENTINEL get-master-addr-by-name mymaster
+```
+
+#### 预期结果
+- Sentinel 检测到主节点下线(5 秒)
+- 2/3 Sentinel 节点达成共识(quorum=2)
+- 自动提升一个从节点为主节点
+- 客户端自动重连到新主节点
+
+---
+
+### 4. 扩展为分片集群(未来)
+
+当缓存数据量增长需要横向扩展时,可以迁移到 RedisCluster:
+
+```yaml
+apiVersion: redis.redis.opstreelabs.in/v1beta2
+kind: RedisCluster
+metadata:
+ name: user-redis-cluster
+ namespace: juwan
+spec:
+ clusterSize: 6 # 3 主 + 3 从
+ kubernetesConfig:
+ image: quay.io/opstree/redis:v7.0.12
+ redisLeader:
+ replicas: 3
+ redisFollower:
+ replicas: 3
+ storage:
+ volumeClaimTemplate:
+ spec:
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: 5Gi
+```
+
+**迁移步骤:**
+1. 部署新的 RedisCluster
+2. 使用 redis-cli --cluster import 迁移数据
+3. 更新应用配置指向新集群
+4. 下线旧的 Sentinel 集群
+
+---
+
+### 5. 备份策略
+
+Redis Operator 不提供自动备份,建议配置定时任务:
+
+```bash
+# 创建 CronJob 定期执行 BGSAVE
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: redis-backup
+ namespace: juwan
+spec:
+ schedule: "0 2 * * *" # 每天凌晨 2 点
+ jobTemplate:
+ spec:
+ template:
+ spec:
+ containers:
+ - name: backup
+ image: redis:7.0.12
+ command:
+ - /bin/sh
+ - -c
+ - |
+ redis-cli -h user-redis-master -a $REDIS_PASSWORD BGSAVE
+ # 将 /data/dump.rdb 上传到对象存储
+ restartPolicy: OnFailure
+```
+
+---
+
+## 📚 总结
+
+### 关键经验
+
+1. **理解资源依赖关系:** RedisSentinel 依赖 RedisReplication,部署顺序很重要
+2. **资源命名规范:** 使用清晰的名称区分不同层次的资源(如 user-redis 和 user-redis-sentinel)
+3. **诊断思路:**
+ - 从现象(Pod 缺失)→ 资源状态(CRD 存在)→ Operator 日志 → 依赖检查
+ - 逐层排查,最终定位到 RedisReplication 缺失
+4. **验证完整性:** 不仅要检查 Pod,还要验证 Service、StatefulSet 等所有相关资源
+
+### 文档价值
+
+本文档可用于:
+- ✅ 团队知识传承
+- ✅ 类似问题的快速排查手册
+- ✅ 新成员的 Redis Operator 学习资料
+- ✅ 事后复盘和经验总结
+
+---
+
+**最后更新时间:** 2026年2月22日
+**文档状态:** ✅ 问题已解决,Redis 集群运行正常
+**下一步行动:** 配置应用连接 Redis
diff --git a/docs/redis-services-guide.md b/docs/redis-services-guide.md
new file mode 100644
index 0000000..22cbcca
--- /dev/null
+++ b/docs/redis-services-guide.md
@@ -0,0 +1,1179 @@
+# Redis Services 连接指南
+
+**环境:** juwan namespace
+**Redis 版本:** 7.0.12
+**部署模式:** RedisReplication + RedisSentinel
+**文档日期:** 2026年2月22日
+
+---
+
+## 📋 目录
+
+1. [Service 列表总览](#service-列表总览)
+2. [Redis 数据层 Service 详解](#redis-数据层-service-详解)
+3. [Sentinel 监控层 Service 详解](#sentinel-监控层-service-详解)
+4. [管理工具连接方式](#管理工具连接方式)
+5. [应用服务连接方式](#应用服务连接方式)
+6. [连接方式对比](#连接方式对比)
+7. [最佳实践建议](#最佳实践建议)
+8. [故障排查](#故障排查)
+
+---
+
+## 📊 Service 列表总览
+
+当前集群中部署的 Redis 相关 Service:
+
+| Service 名称 | 类型 | Cluster IP | 端口 | 用途 |
+|-------------|------|-----------|------|------|
+| **user-redis** | ClusterIP | 10.103.91.84 | 6379, 9121 | 通用访问 + 监控 |
+| **user-redis-additional** | ClusterIP | 10.107.228.48 | 6379 | 额外访问入口 |
+| **user-redis-headless** | ClusterIP(None) | None | 6379 | Pod 间直接访问 |
+| **user-redis-master** ⭐ | ClusterIP | 10.97.120.76 | 6379 | 主节点访问 |
+| **user-redis-replica** ⭐ | ClusterIP | 10.100.213.103 | 6379 | 从节点访问 |
+| **user-redis-sentinel-sentinel** ⭐⭐⭐ | ClusterIP | 10.105.28.231 | 26379 | Sentinel 访问 |
+| **user-redis-sentinel-sentinel-additional** | ClusterIP | 10.97.111.42 | 26379 | 额外 Sentinel 入口 |
+| **user-redis-sentinel-sentinel-headless** | ClusterIP(None) | None | 26379 | Sentinel 间通信 |
+
+**图例:**
+- ⭐⭐⭐ 生产环境强烈推荐
+- ⭐ 生产环境可用
+- 无标记:特殊场景或内部使用
+
+---
+
+## 🔴 Redis 数据层 Service 详解
+
+### 1. user-redis-master ⭐
+
+**基本信息**
+```yaml
+名称: user-redis-master
+类型: ClusterIP
+IP: 10.97.120.76
+端口: 6379/TCP
+DNS: user-redis-master.juwan.svc.cluster.local
+```
+
+**功能特点**
+- 🎯 自动追踪当前 Redis 主节点
+- ✅ 确保所有写操作到达主节点
+- 🔄 故障转移后自动指向新主节点
+- 💪 提供最强一致性保证
+
+**适用场景**
+- ✅ 所有写操作(SET, HSET, ZADD 等)
+- ✅ 需要强一致性的读操作
+- ✅ 事务操作(MULTI/EXEC)
+- ❌ 不适合高并发读请求
+
+**连接示例**
+```go
+// Go - go-redis
+rdb := redis.NewClient(&redis.Options{
+ Addr: "user-redis-master.juwan.svc.cluster.local:6379",
+ Password: os.Getenv("REDIS_PASSWORD"),
+ DB: 0,
+})
+
+// 写操作
+err := rdb.Set(ctx, "user:1001", "John Doe", 0).Err()
+```
+
+```bash
+# CLI 测试
+kubectl run -it --rm redis-cli --image=redis:7.0.12 --restart=Never -n juwan -- \
+ redis-cli -h user-redis-master -a SET test "hello"
+```
+
+---
+
+### 2. user-redis-replica ⭐
+
+**基本信息**
+```yaml
+名称: user-redis-replica
+类型: ClusterIP
+IP: 10.100.213.103
+端口: 6379/TCP
+DNS: user-redis-replica.juwan.svc.cluster.local
+```
+
+**功能特点**
+- 📖 负载均衡到所有从节点(当前 2 个)
+- ⚡ 分散读请求,提升吞吐量
+- 🕐 可能存在轻微的复制延迟(通常 < 100ms)
+- 🚫 只读模式,写操作会失败
+
+**适用场景**
+- ✅ 高并发读请求
+- ✅ 查询操作(GET, HGET, ZRANGE 等)
+- ✅ 统计分析类查询
+- ⚠️ 对数据实时性要求不高的场景
+- ❌ 不能用于写操作
+
+**连接示例**
+```go
+// Go - 读写分离配置
+masterClient := redis.NewClient(&redis.Options{
+ Addr: "user-redis-master.juwan.svc.cluster.local:6379",
+ Password: os.Getenv("REDIS_PASSWORD"),
+})
+
+replicaClient := redis.NewClient(&redis.Options{
+ Addr: "user-redis-replica.juwan.svc.cluster.local:6379",
+ Password: os.Getenv("REDIS_PASSWORD"),
+ ReadOnly: true,
+})
+
+// 写操作用 master
+masterClient.Set(ctx, "counter", 100, 0)
+
+// 读操作用 replica
+val, err := replicaClient.Get(ctx, "counter").Result()
+```
+
+---
+
+### 3. user-redis
+
+**基本信息**
+```yaml
+名称: user-redis
+类型: ClusterIP
+IP: 10.103.91.84
+端口: 6379/TCP (Redis), 9121/TCP (Exporter)
+DNS: user-redis.juwan.svc.cluster.local
+```
+
+**功能特点**
+- 🔀 负载均衡到所有 Redis 节点(主 + 从)
+- 📊 端口 9121 暴露 Prometheus 指标
+- ⚠️ 写操作可能路由到从节点导致失败
+
+**适用场景**
+- ✅ Prometheus 监控抓取(端口 9121)
+- ⚠️ 测试环境的简单访问
+- ❌ 不推荐生产环境读写操作
+
+**监控配置**
+```yaml
+# Prometheus ServiceMonitor
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: redis-metrics
+ namespace: juwan
+spec:
+ selector:
+ matchLabels:
+ app: redis
+ endpoints:
+ - port: redis-exporter
+ interval: 30s
+ path: /metrics
+```
+
+---
+
+### 4. user-redis-additional
+
+**基本信息**
+```yaml
+名称: user-redis-additional
+类型: ClusterIP
+IP: 10.107.228.48
+端口: 6379/TCP
+```
+
+**功能特点**
+- 功能类似 user-redis
+- 提供额外的访问入口
+- 用于多租户或网络隔离场景
+
+**适用场景**
+- 特殊网络策略场景
+- 多应用隔离访问
+- 备用访问点
+
+---
+
+### 5. user-redis-headless
+
+**基本信息**
+```yaml
+名称: user-redis-headless
+类型: ClusterIP (Headless - None)
+端口: 6379/TCP
+DNS:
+ - user-redis-0.user-redis-headless.juwan.svc.cluster.local
+ - user-redis-1.user-redis-headless.juwan.svc.cluster.local
+ - user-redis-2.user-redis-headless.juwan.svc.cluster.local
+```
+
+**功能特点**
+- 🎯 直接返回所有 Pod IP,不做负载均衡
+- 🔗 用于 StatefulSet Pod 间通信
+- 📡 Redis 主从复制使用此服务发现
+
+**适用场景**
+- ✅ 内部复制通信
+- ✅ 集群管理操作
+- ✅ 需要直接访问特定 Pod
+- ❌ 不适合应用层使用
+
+**直接访问示例**
+```bash
+# 直接连接 user-redis-0
+redis-cli -h user-redis-0.user-redis-headless.juwan.svc.cluster.local -a
+
+# DNS 解析会返回具体 Pod IP
+nslookup user-redis-headless.juwan.svc.cluster.local
+```
+
+---
+
+## 🟡 Sentinel 监控层 Service 详解
+
+### 1. user-redis-sentinel-sentinel ⭐⭐⭐
+
+**基本信息**
+```yaml
+名称: user-redis-sentinel-sentinel
+类型: ClusterIP
+IP: 10.105.28.231
+端口: 26379/TCP
+DNS: user-redis-sentinel-sentinel.juwan.svc.cluster.local
+```
+
+**功能特点**
+- 🛡️ 提供高可用 Redis 访问
+- 🔄 自动发现主节点
+- ⚡ 主节点故障时自动切换
+- 📍 客户端自动跟踪主节点变化
+
+**Sentinel 架构**
+```
+应用 → Sentinel Service → Sentinel 节点 (3个)
+ ↓
+ 监控 Redis 集群
+ ↓
+ 自动发现当前主节点位置
+```
+
+**适用场景**
+- ✅✅✅ **生产环境强烈推荐**
+- ✅ 需要自动故障转移
+- ✅ 高可用架构
+- ✅ 无需手动处理主从切换
+
+**连接示例**
+```go
+// Go - go-redis Sentinel 模式
+rdb := redis.NewFailoverClient(&redis.FailoverOptions{
+ MasterName: "mymaster",
+ SentinelAddrs: []string{
+ "user-redis-sentinel-sentinel.juwan.svc.cluster.local:26379",
+ },
+ Password: os.Getenv("REDIS_PASSWORD"),
+ DB: 0,
+
+ // 连接池配置
+ PoolSize: 10,
+ MinIdleConns: 5,
+
+ // 超时配置
+ DialTimeout: 5 * time.Second,
+ ReadTimeout: 3 * time.Second,
+ WriteTimeout: 3 * time.Second,
+})
+
+// 使用方式与普通客户端完全一致
+err := rdb.Set(ctx, "key", "value", 0).Err()
+val, err := rdb.Get(ctx, "key").Result()
+```
+
+```python
+# Python - redis-py Sentinel 模式
+from redis.sentinel import Sentinel
+
+sentinel = Sentinel([
+ ('user-redis-sentinel-sentinel.juwan.svc.cluster.local', 26379)
+], socket_timeout=0.5)
+
+# 获取主节点(写)
+master = sentinel.master_for('mymaster',
+ password=os.getenv('REDIS_PASSWORD'),
+ socket_timeout=0.5)
+master.set('key', 'value')
+
+# 获取从节点(读)
+slave = sentinel.slave_for('mymaster',
+ password=os.getenv('REDIS_PASSWORD'),
+ socket_timeout=0.5)
+value = slave.get('key')
+```
+
+```yaml
+# Spring Boot application.yml
+spring:
+ redis:
+ sentinel:
+ master: mymaster
+ nodes:
+ - user-redis-sentinel-sentinel.juwan.svc.cluster.local:26379
+ password: ${REDIS_PASSWORD}
+```
+
+**Sentinel 命令查询**
+```bash
+# 查看主节点信息
+kubectl exec -it user-redis-sentinel-sentinel-0 -n juwan -- \
+ redis-cli -p 26379 SENTINEL get-master-addr-by-name mymaster
+
+# 查看所有监控的主节点
+kubectl exec -it user-redis-sentinel-sentinel-0 -n juwan -- \
+ redis-cli -p 26379 SENTINEL masters
+
+# 查看从节点列表
+kubectl exec -it user-redis-sentinel-sentinel-0 -n juwan -- \
+ redis-cli -p 26379 SENTINEL slaves mymaster
+
+# 查看 Sentinel 节点
+kubectl exec -it user-redis-sentinel-sentinel-0 -n juwan -- \
+ redis-cli -p 26379 SENTINEL sentinels mymaster
+```
+
+---
+
+### 2. user-redis-sentinel-sentinel-additional
+
+**基本信息**
+```yaml
+名称: user-redis-sentinel-sentinel-additional
+类型: ClusterIP
+IP: 10.97.111.42
+端口: 26379/TCP
+```
+
+**功能特点**
+- 功能同 user-redis-sentinel-sentinel
+- 提供额外访问入口
+- 用于多客户端分离
+
+**适用场景**
+- 多应用共享 Redis 时的访问隔离
+- 网络策略要求
+- 流量分离
+
+---
+
+### 3. user-redis-sentinel-sentinel-headless
+
+**基本信息**
+```yaml
+名称: user-redis-sentinel-sentinel-headless
+类型: ClusterIP (Headless - None)
+端口: 26379/TCP
+```
+
+**功能特点**
+- Sentinel 节点间通信
+- 选举和投票
+- 状态同步
+
+**适用场景**
+- 内部使用,应用层无需关注
+
+---
+
+## 🔧 管理工具连接方式
+
+### 使用 kubectl port-forward(推荐)
+
+#### 方式一:连接主节点
+```bash
+# 转发主节点服务到本地
+kubectl port-forward -n juwan svc/user-redis-master 6379:6379
+
+# 或直接转发到 Pod
+kubectl port-forward -n juwan pod/user-redis-0 6379:6379
+```
+
+然后在管理工具中配置:
+- **Host**: localhost
+- **Port**: 6379
+- **Password**: (见下方获取方法)
+
+#### 方式二:使用 LoadBalancer(生产不推荐)
+```yaml
+# 临时暴露服务(仅用于调试)
+apiVersion: v1
+kind: Service
+metadata:
+ name: redis-external
+ namespace: juwan
+spec:
+ type: LoadBalancer
+ selector:
+ app: user-redis
+ ports:
+ - port: 6379
+ targetPort: 6379
+```
+
+---
+
+### 获取 Redis 密码
+
+```bash
+# 方式一:直接输出
+kubectl get secret user-redis -n juwan -o jsonpath='{.data.password}' | base64 -d
+
+# 方式二:设置为环境变量
+export REDIS_PASSWORD=$(kubectl get secret user-redis -n juwan -o jsonpath='{.data.password}' | base64 -d)
+echo $REDIS_PASSWORD
+
+# 方式三:保存到文件
+kubectl get secret user-redis -n juwan -o jsonpath='{.data.password}' | base64 -d > redis-password.txt
+```
+
+---
+
+### 常用管理工具配置
+
+#### Redis Desktop Manager (RedisInsight)
+```
+Connection Name: juwan-user-redis
+Host: localhost (使用 port-forward)
+Port: 6379
+Username: (留空)
+Password: (从 Secret 获取)
+```
+
+#### redis-cli
+```bash
+# 在集群内访问
+kubectl exec -it user-redis-0 -n juwan -- redis-cli -a
+
+# 从本地访问(需要 port-forward)
+redis-cli -h localhost -p 6379 -a
+
+# 常用命令
+INFO replication # 查看复制状态
+INFO stats # 查看统计信息
+CLUSTER INFO # 查看集群信息
+KEYS * # 查看所有 key(生产谨慎使用)
+```
+
+---
+
+## 💻 应用服务连接方式
+
+### 方案一:Sentinel 模式(生产强烈推荐)⭐⭐⭐
+
+**优点**
+- ✅ 自动故障转移
+- ✅ 高可用
+- ✅ 无需手动切换
+- ✅ 客户端自动重连
+- ✅ 支持读写分离
+
+#### Go-Zero 配置
+
+**配置文件** `app/users/rpc/etc/pb.yaml`
+```yaml
+Name: pb.rpc
+ListenOn: 0.0.0.0:9001
+
+# Redis Sentinel 配置
+Redis:
+ Type: sentinel
+ MasterName: mymaster
+ SentinelAddrs:
+ - user-redis-sentinel-sentinel.juwan.svc.cluster.local:26379
+ Pass: ${REDIS_PASSWORD} # 从环境变量读取
+
+Etcd:
+ Hosts:
+ - etcd-service.juwan.svc.cluster.local:2379
+ Key: pb.rpc
+```
+
+**Config 结构** `app/users/rpc/internal/config/config.go`
+```go
+package config
+
+import (
+ "github.com/zeromicro/go-zero/core/stores/redis"
+ "github.com/zeromicro/go-zero/zrpc"
+)
+
+type Config struct {
+ zrpc.RpcServerConf
+ Redis redis.RedisConf
+}
+```
+
+**初始化 Redis** `app/users/rpc/internal/svc/serviceContext.go`
+```go
+package svc
+
+import (
+ "github.com/zeromicro/go-zero/core/stores/redis"
+ "juwan-backend/app/users/rpc/internal/config"
+)
+
+type ServiceContext struct {
+ Config config.Config
+ Redis *redis.Redis
+}
+
+func NewServiceContext(c config.Config) *ServiceContext {
+ return &ServiceContext{
+ Config: c,
+ Redis: redis.MustNewRedis(c.Redis),
+ }
+}
+```
+
+**使用示例** `app/users/rpc/internal/logic/getUsersByIdLogic.go`
+```go
+func (l *GetUsersByIdLogic) GetUsersById(in *pb.GetUsersByIdReq) (*pb.GetUsersByIdResp, error) {
+ // 尝试从缓存获取
+ cacheKey := fmt.Sprintf("user:%d", in.Id)
+ cached, err := l.svcCtx.Redis.Get(cacheKey)
+ if err == nil && cached != "" {
+ // 缓存命中
+ var user pb.User
+ json.Unmarshal([]byte(cached), &user)
+ return &pb.GetUsersByIdResp{User: &user}, nil
+ }
+
+ // 从数据库查询
+ user := l.fetchUserFromDB(in.Id)
+
+ // 写入缓存
+ userJSON, _ := json.Marshal(user)
+ l.svcCtx.Redis.Setex(cacheKey, string(userJSON), 3600) // 1小时过期
+
+ return &pb.GetUsersByIdResp{User: user}, nil
+}
+```
+
+#### Go (原生 go-redis)
+
+```go
+package main
+
+import (
+ "context"
+ "github.com/redis/go-redis/v9"
+ "time"
+)
+
+func NewRedisClient() *redis.Client {
+ rdb := redis.NewFailoverClient(&redis.FailoverOptions{
+ MasterName: "mymaster",
+ SentinelAddrs: []string{
+ "user-redis-sentinel-sentinel.juwan.svc.cluster.local:26379",
+ },
+ Password: os.Getenv("REDIS_PASSWORD"),
+ DB: 0,
+
+ // 连接池配置
+ PoolSize: 10,
+ MinIdleConns: 5,
+
+ // 超时配置
+ DialTimeout: 5 * time.Second,
+ ReadTimeout: 3 * time.Second,
+ WriteTimeout: 3 * time.Second,
+
+ // 重试配置
+ MaxRetries: 3,
+ MinRetryBackoff: 8 * time.Millisecond,
+ MaxRetryBackoff: 512 * time.Millisecond,
+ })
+
+ // 测试连接
+ ctx := context.Background()
+ if err := rdb.Ping(ctx).Err(); err != nil {
+ panic(err)
+ }
+
+ return rdb
+}
+```
+
+#### Python (redis-py)
+
+```python
+from redis.sentinel import Sentinel
+import os
+
+# 初始化 Sentinel
+sentinel = Sentinel([
+ ('user-redis-sentinel-sentinel.juwan.svc.cluster.local', 26379)
+], socket_timeout=5.0)
+
+# 获取主节点连接(用于写操作)
+master = sentinel.master_for(
+ 'mymaster',
+ password=os.getenv('REDIS_PASSWORD'),
+ socket_timeout=3.0,
+ socket_connect_timeout=5.0,
+ socket_keepalive=True,
+ socket_keepalive_options={},
+ connection_pool_kwargs={
+ 'max_connections': 50
+ }
+)
+
+# 获取从节点连接(用于读操作)
+slave = sentinel.slave_for(
+ 'mymaster',
+ password=os.getenv('REDIS_PASSWORD'),
+ socket_timeout=3.0
+)
+
+# 使用
+master.set('key', 'value')
+value = slave.get('key')
+```
+
+#### Java (Spring Data Redis)
+
+```yaml
+# application.yml
+spring:
+ redis:
+ timeout: 3000ms
+ password: ${REDIS_PASSWORD}
+ sentinel:
+ master: mymaster
+ nodes:
+ - user-redis-sentinel-sentinel.juwan.svc.cluster.local:26379
+ lettuce:
+ pool:
+ max-active: 10
+ max-idle: 5
+ min-idle: 2
+ max-wait: 3000ms
+```
+
+```java
+@Configuration
+public class RedisConfig {
+
+ @Bean
+ public RedisTemplate redisTemplate(
+ RedisConnectionFactory connectionFactory) {
+ RedisTemplate template = new RedisTemplate<>();
+ template.setConnectionFactory(connectionFactory);
+ template.setKeySerializer(new StringRedisSerializer());
+ template.setValueSerializer(new GenericJackson2JsonRedisSerializer());
+ return template;
+ }
+}
+```
+
+---
+
+### 方案二:主从分离模式 ⭐
+
+**优点**
+- ✅ 读写性能优化
+- ✅ 配置清晰
+- ⚠️ 需要手动处理故障
+
+**Go-Zero 配置**
+```yaml
+# app/users/rpc/etc/pb.yaml
+Redis:
+ - Host: user-redis-master.juwan.svc.cluster.local:6379
+ Type: node
+ Pass: ${REDIS_PASSWORD}
+ - Host: user-redis-replica.juwan.svc.cluster.local:6379
+ Type: node
+ Pass: ${REDIS_PASSWORD}
+```
+
+**代码示例**
+```go
+type ServiceContext struct {
+ Config config.Config
+ RedisMaster *redis.Redis // 写操作
+ RedisReplica *redis.Redis // 读操作
+}
+
+func NewServiceContext(c config.Config) *ServiceContext {
+ return &ServiceContext{
+ Config: c,
+ RedisMaster: redis.MustNewRedis(c.Redis[0]), // master
+ RedisReplica: redis.MustNewRedis(c.Redis[1]), // replica
+ }
+}
+
+// 写操作
+l.svcCtx.RedisMaster.Set("key", "value")
+
+// 读操作
+val, _ := l.svcCtx.RedisReplica.Get("key")
+```
+
+---
+
+### 方案三:简单模式(仅测试环境)
+
+```yaml
+Redis:
+ Host: user-redis-master.juwan.svc.cluster.local:6379
+ Type: node
+ Pass: ${REDIS_PASSWORD}
+```
+
+---
+
+### Kubernetes Deployment 配置
+
+```yaml
+# deploy/k8s/service/user/user-rpc.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: user-rpc
+ namespace: juwan
+spec:
+ template:
+ spec:
+ containers:
+ - name: user-rpc
+ image: user-rpc:v1
+ env:
+ # 数据库连接
+ - name: DB_URI
+ valueFrom:
+ secretKeyRef:
+ name: user-db-app
+ key: uri
+
+ # Redis 密码
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: user-redis
+ key: password
+
+ # 健康检查
+ readinessProbe:
+ tcpSocket:
+ port: 9001
+ initialDelaySeconds: 10
+ periodSeconds: 10
+
+ livenessProbe:
+ tcpSocket:
+ port: 9001
+ initialDelaySeconds: 15
+ periodSeconds: 20
+```
+
+---
+
+## 📊 连接方式对比
+
+| 连接方式 | 优点 | 缺点 | 故障转移 | 读写分离 | 复杂度 | 推荐度 |
+|---------|------|------|---------|---------|--------|--------|
+| **Sentinel 模式** | 自动高可用,客户端自动切换 | 配置稍复杂 | ✅ 自动 | ✅ 支持 | 中 | ⭐⭐⭐⭐⭐ |
+| **主从分离** | 性能优化,逻辑清晰 | 需手动处理故障 | ❌ 手动 | ✅ 支持 | 中 | ⭐⭐⭐ |
+| **仅连 Master** | 配置简单,强一致性 | 单点故障,读性能差 | ❌ 手动 | ❌ 不支持 | 低 | ⭐⭐ |
+| **仅连 Replica** | 读性能好 | 只读,不能写入 | ❌ 手动 | ✅ 仅读 | 低 | ⭐ |
+| **连 user-redis** | 极简单 | 性能差,不可靠 | ❌ 无 | ❌ 不支持 | 低 | ❌ |
+
+---
+
+## 🎯 最佳实践建议
+
+### 生产环境(强烈推荐)
+
+```yaml
+# 使用 Sentinel 模式
+Redis:
+ Type: sentinel
+ MasterName: mymaster
+ SentinelAddrs:
+ - user-redis-sentinel-sentinel.juwan.svc.cluster.local:26379
+ Pass: ${REDIS_PASSWORD}
+
+ # 连接池配置
+ PoolSize: 10
+ MinIdleConns: 5
+
+ # 超时配置
+ DialTimeout: 5s
+ ReadTimeout: 3s
+ WriteTimeout: 3s
+
+ # 重试配置
+ MaxRetries: 3
+```
+
+**理由**
+- ✅ 自动故障转移,RTO < 30秒
+- ✅ 客户端无感知切换
+- ✅ 无需人工介入
+- ✅ 久经考验的成熟方案
+
+---
+
+### 开发/测试环境
+
+```yaml
+# 简化配置,直连主节点
+Redis:
+ Host: user-redis-master.juwan.svc.cluster.local:6379
+ Type: node
+ Pass: ${REDIS_PASSWORD}
+```
+
+或使用 port-forward:
+```bash
+kubectl port-forward -n juwan svc/user-redis-master 6379:6379
+```
+
+---
+
+### 性能优化建议
+
+#### 1. 连接池配置
+```go
+PoolSize: runtime.NumCPU() * 2, // CPU 数量的 2 倍
+MinIdleConns: runtime.NumCPU(), // CPU 数量
+MaxConnAge: 30 * time.Minute, // 连接最大存活时间
+```
+
+#### 2. 超时配置
+```go
+DialTimeout: 5 * time.Second, // 连接超时
+ReadTimeout: 3 * time.Second, // 读超时
+WriteTimeout: 3 * time.Second, // 写超时
+PoolTimeout: 4 * time.Second, // 获取连接超时
+```
+
+#### 3. 命令优化
+```go
+// ❌ 避免:循环中多次调用
+for i := 0; i < 1000; i++ {
+ rdb.Set(ctx, fmt.Sprintf("key:%d", i), i)
+}
+
+// ✅ 推荐:使用 Pipeline
+pipe := rdb.Pipeline()
+for i := 0; i < 1000; i++ {
+ pipe.Set(ctx, fmt.Sprintf("key:%d", i), i, 0)
+}
+pipe.Exec(ctx)
+```
+
+#### 4. 缓存策略
+```go
+// Cache-Aside Pattern
+func GetUser(id int64) (*User, error) {
+ // 1. 先查缓存
+ cacheKey := fmt.Sprintf("user:%d", id)
+ cached, err := rdb.Get(ctx, cacheKey).Result()
+ if err == nil {
+ var user User
+ json.Unmarshal([]byte(cached), &user)
+ return &user, nil
+ }
+
+ // 2. 缓存未命中,查数据库
+ user := queryFromDB(id)
+
+ // 3. 写入缓存
+ userJSON, _ := json.Marshal(user)
+ rdb.Set(ctx, cacheKey, userJSON, 1*time.Hour)
+
+ return user, nil
+}
+```
+
+---
+
+### 监控配置
+
+#### Prometheus 指标采集
+```yaml
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ name: redis-exporter
+ namespace: juwan
+spec:
+ selector:
+ matchLabels:
+ app: user-redis
+ endpoints:
+ - port: redis-exporter # 端口 9121
+ interval: 30s
+ path: /metrics
+```
+
+#### 关键指标监控
+```yaml
+# 告警规则示例
+groups:
+ - name: redis
+ rules:
+ # Redis 实例宕机
+ - alert: RedisDown
+ expr: redis_up == 0
+ for: 1m
+ annotations:
+ summary: "Redis instance down"
+
+ # 内存使用率过高
+ - alert: RedisMemoryHigh
+ expr: redis_memory_used_bytes / redis_memory_max_bytes > 0.9
+ for: 5m
+ annotations:
+ summary: "Redis memory usage > 90%"
+
+ # 连接数过高
+ - alert: RedisConnectionsHigh
+ expr: redis_connected_clients > 1000
+ for: 5m
+ annotations:
+ summary: "Redis connections > 1000"
+```
+
+---
+
+### 安全建议
+
+#### 1. 密码管理
+```bash
+# 定期轮换密码
+kubectl create secret generic user-redis \
+ --from-literal=password=$(openssl rand -base64 32) \
+ --dry-run=client -o yaml | kubectl apply -f -
+
+# 重启 Redis Pods 使新密码生效
+kubectl rollout restart statefulset/user-redis -n juwan
+```
+
+#### 2. 网络策略
+```yaml
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: redis-access
+ namespace: juwan
+spec:
+ podSelector:
+ matchLabels:
+ app: user-redis
+ policyTypes:
+ - Ingress
+ ingress:
+ # 只允许同命名空间的 user-rpc 访问
+ - from:
+ - podSelector:
+ matchLabels:
+ app: user-rpc
+ ports:
+ - protocol: TCP
+ port: 6379
+```
+
+#### 3. TLS 加密(可选)
+```yaml
+# Redis TLS 配置
+apiVersion: redis.redis.opstreelabs.in/v1beta2
+kind: RedisReplication
+metadata:
+ name: user-redis
+spec:
+ TLS:
+ enabled: true
+ secret:
+ secretName: redis-tls-cert
+```
+
+---
+
+## 🔍 故障排查
+
+### 1. 连接失败
+
+**症状**
+```
+Error: dial tcp 10.97.120.76:6379: i/o timeout
+```
+
+**排查步骤**
+```bash
+# 1. 检查 Service 是否存在
+kubectl get svc user-redis-master -n juwan
+
+# 2. 检查 Endpoints
+kubectl get endpoints user-redis-master -n juwan
+
+# 3. 检查 Pod 状态
+kubectl get pods -l app=user-redis -n juwan
+
+# 4. 测试网络连通性
+kubectl run -it --rm netshoot --image=nicolaka/netshoot --restart=Never -n juwan -- \
+ nc -zv user-redis-master 6379
+
+# 5. 查看 Pod 日志
+kubectl logs user-redis-0 -n juwan -c redis
+```
+
+---
+
+### 2. 认证失败
+
+**症状**
+```
+Error: NOAUTH Authentication required
+```
+
+**解决方法**
+```bash
+# 1. 确认 Secret 存在
+kubectl get secret user-redis -n juwan
+
+# 2. 验证密码
+PASSWORD=$(kubectl get secret user-redis -n juwan -o jsonpath='{.data.password}' | base64 -d)
+echo $PASSWORD
+
+# 3. 测试连接
+kubectl exec -it user-redis-0 -n juwan -- redis-cli -a $PASSWORD PING
+```
+
+---
+
+### 3. 主从复制异常
+
+**症状**
+```
+Warning: Redis replica lag is high
+```
+
+**排查**
+```bash
+# 在主节点执行
+kubectl exec -it user-redis-0 -n juwan -- redis-cli -a INFO replication
+
+# 查看输出
+# role:master
+# connected_slaves:2
+# slave0:ip=10.244.1.10,port=6379,state=online,offset=1234,lag=0
+# slave1:ip=10.244.2.15,port=6379,state=online,offset=1234,lag=0
+```
+
+**如果 lag 过大**
+```bash
+# 检查网络延迟
+kubectl exec -it user-redis-0 -n juwan -- ping user-redis-1
+
+# 检查 Redis 性能
+kubectl exec -it user-redis-0 -n juwan -- redis-cli -a INFO stats
+```
+
+---
+
+### 4. Sentinel 无法发现主节点
+
+**症状**
+```
+Error: sentinel: no master found
+```
+
+**排查**
+```bash
+# 1. 检查 Sentinel 状态
+kubectl exec -it user-redis-sentinel-sentinel-0 -n juwan -- \
+ redis-cli -p 26379 SENTINEL masters
+
+# 2. 检查主节点地址
+kubectl exec -it user-redis-sentinel-sentinel-0 -n juwan -- \
+ redis-cli -p 26379 SENTINEL get-master-addr-by-name mymaster
+
+# 3. 查看 Sentinel 日志
+kubectl logs user-redis-sentinel-sentinel-0 -n juwan
+
+# 4. 手动触发故障转移(慎用)
+kubectl exec -it user-redis-sentinel-sentinel-0 -n juwan -- \
+ redis-cli -p 26379 SENTINEL failover mymaster
+```
+
+---
+
+### 5. 性能问题
+
+**慢查询分析**
+```bash
+# 查看慢查询
+kubectl exec -it user-redis-0 -n juwan -- \
+ redis-cli -a SLOWLOG GET 10
+
+# 设置慢查询阈值(10ms)
+kubectl exec -it user-redis-0 -n juwan -- \
+ redis-cli -a CONFIG SET slowlog-log-slower-than 10000
+```
+
+**命令统计**
+```bash
+# 查看命令统计
+kubectl exec -it user-redis-0 -n juwan -- \
+ redis-cli -a INFO commandstats
+```
+
+**内存分析**
+```bash
+# 查看内存使用
+kubectl exec -it user-redis-0 -n juwan -- \
+ redis-cli -a INFO memory
+
+# 查看大 key
+kubectl exec -it user-redis-0 -n juwan -- \
+ redis-cli -a --bigkeys
+```
+
+---
+
+## 📚 参考资源
+
+### 官方文档
+- [Redis Sentinel Documentation](https://redis.io/docs/management/sentinel/)
+- [go-redis Documentation](https://redis.uptrace.dev/)
+- [OpsTree Redis Operator](https://ot-redis-operator.netlify.app/)
+
+### 客户端库
+- Go: [github.com/redis/go-redis/v9](https://github.com/redis/go-redis)
+- Python: [redis-py](https://github.com/redis/redis-py)
+- Java: [Spring Data Redis](https://spring.io/projects/spring-data-redis)
+- Node.js: [ioredis](https://github.com/luin/ioredis)
+
+### 监控工具
+- [RedisInsight](https://redis.com/redis-enterprise/redis-insight/)
+- [Redis Exporter](https://github.com/oliver006/redis_exporter)
+- Grafana Dashboard: [Redis Dashboard 11835](https://grafana.com/grafana/dashboards/11835)
+
+---
+
+## 📝 更新日志
+
+| 日期 | 版本 | 变更内容 |
+|------|------|---------|
+| 2026-02-22 | 1.0 | 初始版本,包含完整的 Service 介绍和连接指南 |
+
+---
+
+**文档维护者**: DevOps Team
+**最后更新**: 2026年2月22日
+**下一次审查**: 2026年3月22日
diff --git a/docs/redis-username-discovery.md b/docs/redis-username-discovery.md
new file mode 100644
index 0000000..902a502
--- /dev/null
+++ b/docs/redis-username-discovery.md
@@ -0,0 +1,1068 @@
+# Redis 用户名查找和认证配置指南
+
+**问题日期:** 2026年2月22日
+**环境:** juwan namespace
+**Redis 版本:** 7.0.12
+**部署模式:** RedisReplication (OpsTree Operator)
+
+---
+
+## 📋 目录
+
+1. [问题背景](#问题背景)
+2. [Redis 认证机制演进](#redis-认证机制演进)
+3. [诊断过程](#诊断过程)
+4. [用户名发现结果](#用户名发现结果)
+5. [各语言连接配置](#各语言连接配置)
+6. [常见问题解答](#常见问题解答)
+7. [安全建议](#安全建议)
+
+---
+
+## 🎯 问题背景
+
+### 问题描述
+在配置应用连接 Redis 时,发现只有密码信息(存储在 Kubernetes Secret `user-redis` 中),但不清楚:
+- Redis 是否需要用户名?
+- 如果需要,用户名是什么?
+- 如何查找 Redis 的用户名配置?
+
+### 初始已知信息
+```yaml
+Secret: user-redis
+Namespace: juwan
+Key: password
+Value: (base64 编码的密码)
+```
+
+### 疑问
+- 使用 RedisInsight 等管理工具时,Username 字段应该填什么?
+- 应用代码中是否需要配置 Username?
+- 不同 Redis 版本的认证方式是否有差异?
+
+---
+
+## 📚 Redis 认证机制演进
+
+### Redis 6.0 之前:传统密码认证
+
+**特点:**
+- ❌ 不支持多用户
+- ✅ 只需配置一个全局密码
+- 🔧 配置项:`requirepass`
+- 👤 隐式用户名:`default`(客户端不需要指定)
+
+**配置示例:**
+```conf
+# redis.conf
+requirepass mypassword
+```
+
+**连接方式:**
+```bash
+# 只需要密码
+redis-cli -h host -p 6379 -a mypassword
+
+# 或使用 AUTH 命令
+redis-cli -h host -p 6379
+> AUTH mypassword
+```
+
+**优点:**
+- ✅ 配置简单
+- ✅ 向后兼容
+
+**缺点:**
+- ❌ 所有客户端共享同一密码
+- ❌ 无法区分不同应用的访问权限
+- ❌ 无法限制特定命令
+- ❌ 审计困难
+
+---
+
+### Redis 6.0+:ACL(访问控制列表)
+
+**引入时间:** Redis 6.0 (2020年5月)
+
+**新特性:**
+- ✅ 支持多用户
+- ✅ 每个用户有独立的密码
+- ✅ 细粒度权限控制(命令、key、channel)
+- ✅ 默认用户 `default` 兼容旧版本
+- ✅ 支持运行时动态修改
+
+**架构对比:**
+```
+传统模式:
+┌─────────────┐
+│ 所有客户端 │
+└──────┬──────┘
+ │ (一个密码)
+ ↓
+┌─────────────┐
+│ Redis │
+│ (全权限) │
+└─────────────┘
+
+ACL 模式:
+┌────────┐ ┌────────┐ ┌────────┐
+│ App A │ │ App B │ │ Admin │
+└───┬────┘ └───┬────┘ └───┬────┘
+ │ user1 │ user2 │ admin
+ │ (读写) │ (只读) │ (全部)
+ ↓ ↓ ↓
+┌───────────────────────────────┐
+│ Redis ACL │
+│ user1: +@write +@read │
+│ user2: +@read ~cache:* │
+│ admin: +@all ~* │
+└───────────────────────────────┘
+```
+
+**ACL 规则语法:**
+```
+user on ~ +
+
+示例:
+user alice on >secret123 ~* +@all # 全部权限
+user bob on >pass456 ~cache:* +get +set # 只能操作 cache:* 的 key
+user readonly on >readonly ~* +@read -@write # 只读权限
+```
+
+---
+
+## 🔍 诊断过程
+
+### 步骤 1:确认 Pod 容器名称
+
+**目的:** 找到 Redis 容器的正确名称,以便执行命令
+
+**命令:**
+```bash
+kubectl get pod user-redis-0 -n juwan -o jsonpath='{.spec.containers[*].name}'
+```
+
+**输出:**
+```
+user-redis redis-exporter
+```
+
+**分析:**
+- ✅ Pod 中有 2 个容器
+- 📦 `user-redis`:Redis 主容器
+- 📊 `redis-exporter`:Prometheus 监控容器
+- 🎯 **需要连接到 `user-redis` 容器**
+
+**关键信息:**
+```yaml
+Pod: user-redis-0
+Containers:
+ - name: user-redis # ← Redis 服务容器
+ image: quay.io/opstree/redis:v7.0.12
+ port: 6379
+ - name: redis-exporter # ← 监控容器
+ image: quay.io/opstree/redis-exporter
+ port: 9121
+```
+
+---
+
+### 步骤 2:获取 Redis 密码
+
+**目的:** 从 Kubernetes Secret 中提取密码,用于认证
+
+**命令:**
+```bash
+kubectl get secret user-redis -n juwan -o jsonpath='{.data.password}' | base64 -d
+```
+
+**输出:**
+```
+<密码内容> # 实际密码已隐藏
+```
+
+**说明:**
+- Secret 中的数据以 base64 编码存储
+- 需要解码才能得到明文密码
+- `base64 -d` 在 Linux/Mac 上;Windows PowerShell 需要用其他方法
+
+**Windows PowerShell 解码方法:**
+```powershell
+$encoded = kubectl get secret user-redis -n juwan -o jsonpath='{.data.password}'
+[System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($encoded))
+```
+
+---
+
+### 步骤 3:检查 ACL 配置(关键步骤)
+
+**目的:** 查询 Redis 的用户配置,确认是否启用 ACL 以及有哪些用户
+
+**命令:**
+```bash
+kubectl exec -it user-redis-0 -n juwan -c user-redis -- \
+ redis-cli -a $(kubectl get secret user-redis -n juwan -o jsonpath='{.data.password}' | base64 -d) \
+ ACL LIST
+```
+
+**命令解析:**
+```bash
+kubectl exec -it user-redis-0 \ # 在 user-redis-0 Pod 中执行命令
+ -n juwan \ # 命名空间
+ -c user-redis \ # 指定容器
+ -- \ # 分隔符
+ redis-cli \ # Redis 命令行工具
+ -a $(kubectl get secret ...) \ # 使用密码认证
+ ACL LIST # 列出所有 ACL 用户
+```
+
+**输出:**
+```
+Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
+1) "user default on #e3e7d4b9413497efc274c747b2ee88023e00e6416080db92c7bdd49a73f32d3d ~* &* +@all"
+```
+
+**输出分析:**
+
+#### 警告信息
+```
+Warning: Using a password with '-a' option on the command line interface may not be safe.
+```
+- ⚠️ 安全提醒:命令行中的密码可能被历史记录或进程列表泄露
+- 💡 生产环境建议:使用配置文件或环境变量传递密码
+
+#### ACL 规则详解
+```
+user default on #e3e7d4b...73f32d3d ~* &* +@all
+| | | | | | |
+| | | | | | └─ 权限:所有命令
+| | | | | └──── 所有 Pub/Sub channel
+| | | | └─────── 所有 key pattern
+| | | └──────────────────────────── 密码哈希(SHA256)
+| | └─────────────────────────────── 状态:已启用
+| └─────────────────────────────────────── 用户名
+└──────────────────────────────────────────── 类型:用户
+```
+
+**权限标识含义:**
+
+| 标识 | 含义 | 说明 |
+|-----|------|------|
+| `on` | 用户已启用 | 可以登录 |
+| `off` | 用户已禁用 | 无法登录 |
+| `~*` | Key Pattern | `*` = 所有 key;`~cache:*` = 只能访问 cache 开头的 key |
+| `&*` | Pub/Sub Pattern | `*` = 所有 channel |
+| `+@all` | 命令权限 | `@all` = 所有命令;`+get +set` = 只能用 GET/SET |
+| `-@dangerous` | 禁止命令组 | 禁止危险命令(FLUSHDB, KEYS 等)|
+| `#hash` | 密码哈希 | SHA256 哈希值,不存储明文 |
+
+**命令组示例:**
+- `@read`:只读命令(GET, HGET, LRANGE 等)
+- `@write`:写入命令(SET, HSET, LPUSH 等)
+- `@admin`:管理命令(CONFIG, SHUTDOWN 等)
+- `@dangerous`:危险命令(FLUSHALL, KEYS 等)
+- `@all`:所有命令
+
+**结论:**
+- ✅ Redis 启用了 ACL 模式
+- 👤 **用户名是:`default`**
+- 🔑 密码已配置(存储为 SHA256 哈希)
+- 🔓 权限:完全访问(所有命令、所有 key、所有 channel)
+
+---
+
+### 步骤 4:验证用户名配置
+
+**目的:** 确认 `default` 用户可以正常工作
+
+**方法 1:使用密码(不指定用户名)**
+```bash
+kubectl exec -it user-redis-0 -n juwan -c user-redis -- \
+ redis-cli -a PING
+```
+
+**预期输出:**
+```
+Warning: Using a password with '-a' option on the command line interface may not be safe.
+PONG
+```
+
+✅ **说明:只提供密码可以正常连接(默认使用 default 用户)**
+
+---
+
+**方法 2:显式指定用户名**
+```bash
+kubectl exec -it user-redis-0 -n juwan -c user-redis -- \
+ redis-cli --user default --pass PING
+```
+
+**预期输出:**
+```
+PONG
+```
+
+✅ **说明:显式指定 default 用户也可以正常连接**
+
+---
+
+**方法 3:测试错误的用户名**
+```bash
+kubectl exec -it user-redis-0 -n juwan -c user-redis -- \
+ redis-cli --user wronguser --pass PING
+```
+
+**预期输出:**
+```
+(error) WRONGPASS invalid username-password pair or user is disabled.
+```
+
+❌ **说明:不存在的用户名会认证失败**
+
+---
+
+### 步骤 5:查看完整的 ACL 信息
+
+**目的:** 了解 `default` 用户的详细权限配置
+
+**命令:**
+```bash
+kubectl exec -it user-redis-0 -n juwan -c user-redis -- \
+ redis-cli -a ACL GETUSER default
+```
+
+**输出:**
+```
+ 1) "flags"
+ 2) 1) "on"
+ 2) "allkeys"
+ 3) "allchannels"
+ 4) "allcommands"
+ 3) "passwords"
+ 4) 1) "e3e7d4b9413497efc274c747b2ee88023e00e6416080db92c7bdd49a73f32d3d"
+ 5) "commands"
+ 6) "+@all"
+ 7) "keys"
+ 8) 1) "*"
+ 9) "channels"
+10) 1) "*"
+11) "selectors"
+12) (empty array)
+```
+
+**详细分析:**
+
+| 字段 | 值 | 含义 |
+|-----|---|------|
+| `flags` | `on, allkeys, allchannels, allcommands` | 用户已启用,可访问所有资源 |
+| `passwords` | `e3e7d4b...` (SHA256) | 密码哈希 |
+| `commands` | `+@all` | 允许所有命令组 |
+| `keys` | `*` | 可访问所有 key |
+| `channels` | `*` | 可访问所有 Pub/Sub channel |
+
+**权限总结:**
+- ✅ 用户状态:启用(on)
+- ✅ 命令权限:所有命令(+@all)
+- ✅ Key 权限:所有 key(~*)
+- ✅ Channel 权限:所有 channel(&*)
+- 🔒 密码:已设置且加密存储
+
+---
+
+## 🎯 用户名发现结果
+
+### 最终结论
+
+#### ✅ 当前 Redis 配置
+```yaml
+认证模式: ACL (Redis 6.0+)
+用户名: default
+密码: (存储在 Secret user-redis 中)
+权限: 完全访问(超级用户)
+状态: 已启用
+```
+
+#### 📝 连接参数汇总
+
+| 参数 | 值 | 备注 |
+|-----|---|------|
+| **用户名** | `default` | 可省略,客户端默认使用 |
+| **密码** | `kubectl get secret user-redis -n juwan -o jsonpath='{.data.password}' \| base64 -d` | 必需 |
+| **Host (集群内)** | `user-redis-master.juwan.svc.cluster.local` | 写操作 |
+| **Host (集群内)** | `user-redis-replica.juwan.svc.cluster.local` | 读操作 |
+| **Host (Sentinel)** | `user-redis-sentinel-sentinel.juwan.svc.cluster.local` | 推荐 |
+| **Port** | `6379` | Redis 数据端口 |
+| **Sentinel Port** | `26379` | Sentinel 端口 |
+
+---
+
+### 为什么大多数情况不需要指定用户名?
+
+#### Redis 客户端的默认行为
+
+1. **向后兼容**
+ ```
+ Redis 6.0+ 保持了与旧版本的兼容性
+ 当客户端只提供密码时,自动使用 'default' 用户
+ ```
+
+2. **客户端实现**
+ ```go
+ // go-redis 内部逻辑(伪代码)
+ if password != "" && username == "" {
+ username = "default" // 自动补全
+ }
+ ```
+
+3. **AUTH 命令演进**
+ ```bash
+ # Redis 5.x 及之前
+ AUTH password # 只有密码
+
+ # Redis 6.0+(向后兼容)
+ AUTH password # 等价于 AUTH default password
+ AUTH username password # 新格式
+ ```
+
+#### 何时需要显式指定用户名?
+
+| 场景 | 是否需要 | 原因 |
+|-----|---------|------|
+| 使用 `default` 用户 | ❌ 不需要 | 客户端自动使用 |
+| 创建了自定义用户 | ✅ 需要 | 必须明确指定 |
+| 多应用共享 Redis | ✅ 推荐 | 权限隔离 |
+| 审计需求 | ✅ 推荐 | 区分访问来源 |
+| 管理工具连接 | ⚠️ 可选 | 有些工具要求填写 |
+
+---
+
+## 💻 各语言连接配置
+
+### Go (go-redis)
+
+#### 方式 1:只用密码(推荐)
+```go
+import "github.com/redis/go-redis/v9"
+
+// 不指定 Username,自动使用 default
+rdb := redis.NewClient(&redis.Options{
+ Addr: "user-redis-master.juwan.svc.cluster.local:6379",
+ Password: os.Getenv("REDIS_PASSWORD"), // 只需密码
+ DB: 0,
+})
+```
+
+#### 方式 2:显式指定用户名
+```go
+// 显式指定 Username(效果相同)
+rdb := redis.NewClient(&redis.Options{
+ Addr: "user-redis-master.juwan.svc.cluster.local:6379",
+ Username: "default", // 明确指定
+ Password: os.Getenv("REDIS_PASSWORD"),
+ DB: 0,
+})
+```
+
+#### Sentinel 模式(生产推荐)
+```go
+// Sentinel 模式也支持用户名
+rdb := redis.NewFailoverClient(&redis.FailoverOptions{
+ MasterName: "mymaster",
+ SentinelAddrs: []string{
+ "user-redis-sentinel-sentinel.juwan.svc.cluster.local:26379",
+ },
+ // Username 可省略(默认 default)
+ Password: os.Getenv("REDIS_PASSWORD"),
+ DB: 0,
+})
+```
+
+---
+
+### Go-Zero 框架
+
+#### 配置文件
+```yaml
+# app/users/rpc/etc/pb.yaml
+Redis:
+ Type: sentinel
+ MasterName: mymaster
+ SentinelAddrs:
+ - user-redis-sentinel-sentinel.juwan.svc.cluster.local:26379
+ Pass: ${REDIS_PASSWORD} # 只需密码,不需要用户名
+```
+
+#### 如果需要自定义用户
+```yaml
+Redis:
+ Type: sentinel
+ MasterName: mymaster
+ SentinelAddrs:
+ - user-redis-sentinel-sentinel.juwan.svc.cluster.local:26379
+ Username: default # 可选:显式指定
+ Pass: ${REDIS_PASSWORD}
+```
+
+---
+
+### Python (redis-py)
+
+#### 只用密码
+```python
+import redis
+
+# 不指定 username
+r = redis.Redis(
+ host='user-redis-master.juwan.svc.cluster.local',
+ port=6379,
+ password=os.getenv('REDIS_PASSWORD'), # 只需密码
+ db=0
+)
+```
+
+#### 显式指定用户名(redis-py 4.3+)
+```python
+# 显式指定 username
+r = redis.Redis(
+ host='user-redis-master.juwan.svc.cluster.local',
+ port=6379,
+ username='default', # 明确指定
+ password=os.getenv('REDIS_PASSWORD'),
+ db=0
+)
+```
+
+#### Sentinel 模式
+```python
+from redis.sentinel import Sentinel
+
+sentinel = Sentinel([
+ ('user-redis-sentinel-sentinel.juwan.svc.cluster.local', 26379)
+])
+
+# 获取主节点(不指定 username)
+master = sentinel.master_for(
+ 'mymaster',
+ password=os.getenv('REDIS_PASSWORD')
+)
+
+# 或显式指定
+master = sentinel.master_for(
+ 'mymaster',
+ username='default',
+ password=os.getenv('REDIS_PASSWORD')
+)
+```
+
+---
+
+### Java (Spring Data Redis)
+
+#### application.yml(只用密码)
+```yaml
+spring:
+ redis:
+ password: ${REDIS_PASSWORD} # 只需密码
+ sentinel:
+ master: mymaster
+ nodes:
+ - user-redis-sentinel-sentinel.juwan.svc.cluster.local:26379
+```
+
+#### 显式指定用户名(Spring Boot 2.6+)
+```yaml
+spring:
+ redis:
+ username: default # 可选
+ password: ${REDIS_PASSWORD}
+ sentinel:
+ master: mymaster
+ nodes:
+ - user-redis-sentinel-sentinel.juwan.svc.cluster.local:26379
+```
+
+#### Java 代码(Jedis)
+```java
+// 只用密码
+JedisPoolConfig poolConfig = new JedisPoolConfig();
+JedisPool pool = new JedisPool(
+ poolConfig,
+ "user-redis-master.juwan.svc.cluster.local",
+ 6379,
+ 2000,
+ "password" // 只需密码
+);
+
+// 显式指定用户名(Jedis 4.0+)
+JedisPool pool = new JedisPool(
+ poolConfig,
+ "user-redis-master.juwan.svc.cluster.local",
+ 6379,
+ 2000,
+ "default", // 用户名
+ "password" // 密码
+);
+```
+
+---
+
+### Node.js (ioredis)
+
+#### 只用密码
+```javascript
+const Redis = require('ioredis');
+
+// 不指定 username
+const redis = new Redis({
+ host: 'user-redis-master.juwan.svc.cluster.local',
+ port: 6379,
+ password: process.env.REDIS_PASSWORD, // 只需密码
+});
+```
+
+#### 显式指定用户名(ioredis 4.0+)
+```javascript
+// 显式指定 username
+const redis = new Redis({
+ host: 'user-redis-master.juwan.svc.cluster.local',
+ port: 6379,
+ username: 'default', // 明确指定
+ password: process.env.REDIS_PASSWORD,
+});
+```
+
+#### Sentinel 模式
+```javascript
+const redis = new Redis({
+ sentinels: [
+ {
+ host: 'user-redis-sentinel-sentinel.juwan.svc.cluster.local',
+ port: 26379
+ }
+ ],
+ name: 'mymaster',
+ password: process.env.REDIS_PASSWORD, // username 可省略
+});
+```
+
+---
+
+### redis-cli 命令行
+
+#### 只用密码
+```bash
+# 方式 1:命令行参数
+redis-cli -h user-redis-master.juwan.svc.cluster.local -p 6379 -a
+
+# 方式 2:交互式登录
+redis-cli -h user-redis-master.juwan.svc.cluster.local -p 6379
+> AUTH
+OK
+```
+
+#### 显式指定用户名
+```bash
+# 方式 1:命令行参数
+redis-cli -h host -p 6379 --user default --pass
+
+# 方式 2:交互式登录
+redis-cli -h host -p 6379
+> AUTH default
+OK
+```
+
+---
+
+## 🛠️ 管理工具配置
+
+### RedisInsight
+
+**配置示例:**
+```
+Name: juwan-user-redis
+Host: localhost (使用 kubectl port-forward)
+Port: 6379
+Username: default ← 填这个(或留空)
+Password: <从 Secret 获取>
+```
+
+**说明:**
+- ✅ Username 可以填 `default`
+- ✅ Username 也可以留空(某些版本支持)
+- 🔧 建议先 port-forward:`kubectl port-forward -n juwan svc/user-redis-master 6379:6379`
+
+---
+
+### Redis Commander
+
+**Docker 运行:**
+```bash
+docker run -d \
+ -e REDIS_HOSTS=juwan:user-redis-master.juwan.svc.cluster.local:6379:0:password \
+ -p 8081:8081 \
+ rediscommander/redis-commander
+```
+
+**URL 格式:**
+```
+# 不带用户名
+redis://:@host:6379
+
+# 带用户名
+redis://default:@host:6379
+```
+
+---
+
+### Another Redis Desktop Manager
+
+**连接配置:**
+```json
+{
+ "name": "juwan-redis",
+ "host": "localhost",
+ "port": 6379,
+ "auth": "password", // 只需密码
+ "username": "" // 留空(或填 "default")
+}
+```
+
+---
+
+## ❓ 常见问题解答
+
+### Q1: 为什么有些地方说 Redis 不需要用户名?
+
+**A:** 这取决于 Redis 版本:
+
+```
+Redis 5.x 及之前:
+└─ ❌ 不支持用户名,只有全局密码
+ └─ 配置:requirepass password
+
+Redis 6.0+:
+└─ ✅ 支持 ACL 多用户
+ └─ 但保持向后兼容
+ └─ 如果只提供密码,自动使用 'default' 用户
+```
+
+### Q2: 我的 Redis 是哪个版本?
+
+**查看方法:**
+```bash
+kubectl exec -it user-redis-0 -n juwan -c user-redis -- redis-cli -v
+# 或
+kubectl exec -it user-redis-0 -n juwan -c user-redis -- redis-server --version
+```
+
+**你的环境:**
+```
+Redis 7.0.12 (quay.io/opstree/redis:v7.0.12)
+✅ 支持 ACL
+✅ 用户名: default
+```
+
+### Q3: 如何创建自定义用户?
+
+**场景:** 为不同应用创建独立的用户账号
+
+#### 创建只读用户
+```bash
+kubectl exec -it user-redis-0 -n juwan -c user-redis -- \
+ redis-cli -a ACL SETUSER readonly on \
+ '>readonly_password' \
+ ~* \
+ +@read -@write -@dangerous
+```
+
+**验证:**
+```bash
+# 使用新用户登录
+kubectl exec -it user-redis-0 -n juwan -c user-redis -- \
+ redis-cli --user readonly --pass readonly_password GET somekey
+
+# 尝试写操作(应该失败)
+kubectl exec -it user-redis-0 -n juwan -c user-redis -- \
+ redis-cli --user readonly --pass readonly_password SET somekey value
+# (error) NOPERM User has no permissions to run the 'set' command
+```
+
+#### 创建应用专用用户
+```bash
+# 只能访问 app:* 开头的 key
+kubectl exec -it user-redis-0 -n juwan -c user-redis -- \
+ redis-cli -a ACL SETUSER app_user on \
+ '>app_password' \
+ ~app:* \
+ +@all
+```
+
+#### 保存 ACL 配置
+```bash
+# 持久化到配置文件
+kubectl exec -it user-redis-0 -n juwan -c user-redis -- \
+ redis-cli -a ACL SAVE
+```
+
+⚠️ **注意:** 在 Kubernetes 环境中,Pod 重启可能丢失 ACL 配置。建议:
+1. 使用 ConfigMap 存储 ACL 配置
+2. 在 StatefulSet 启动脚本中加载配置
+3. 或使用 Redis Operator 的 ACL 管理功能
+
+### Q4: 如何查看所有用户?
+
+```bash
+kubectl exec -it user-redis-0 -n juwan -c user-redis -- \
+ redis-cli -a ACL LIST
+```
+
+### Q5: 如何重置用户密码?
+
+#### 重置 default 用户密码
+```bash
+# 1. 生成新密码
+NEW_PASSWORD=$(openssl rand -base64 32)
+
+# 2. 在 Redis 中更新
+kubectl exec -it user-redis-0 -n juwan -c user-redis -- \
+ redis-cli -a ACL SETUSER default ">$NEW_PASSWORD"
+
+# 3. 更新 Kubernetes Secret
+kubectl create secret generic user-redis \
+ --from-literal=password=$NEW_PASSWORD \
+ --dry-run=client -o yaml | kubectl apply -f -
+
+# 4. 重启应用 Pods(使新密码生效)
+kubectl rollout restart deployment/user-rpc -n juwan
+```
+
+### Q6: 客户端报错 "WRONGPASS invalid username-password pair"
+
+**可能原因:**
+
+1. **密码错误**
+ ```bash
+ # 验证密码
+ kubectl get secret user-redis -n juwan -o jsonpath='{.data.password}' | base64 -d
+ ```
+
+2. **用户名错误**
+ ```bash
+ # 检查用户是否存在
+ kubectl exec -it user-redis-0 -n juwan -c user-redis -- \
+ redis-cli -a ACL LIST
+ ```
+
+3. **用户被禁用**
+ ```bash
+ # 启用用户
+ kubectl exec -it user-redis-0 -n juwan -c user-redis -- \
+ redis-cli -a ACL SETUSER default on
+ ```
+
+4. **网络连接到了错误的 Redis 实例**
+ ```bash
+ # 确认连接的主机
+ kubectl get svc -n juwan | grep redis
+ ```
+
+### Q7: 在 Kubernetes 中如何安全地传递密码?
+
+**推荐方案:环境变量 + Secret**
+
+```yaml
+# Deployment
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: user-rpc
+spec:
+ template:
+ spec:
+ containers:
+ - name: user-rpc
+ env:
+ - name: REDIS_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: user-redis
+ key: password
+```
+
+**应用代码:**
+```go
+// 从环境变量读取
+password := os.Getenv("REDIS_PASSWORD")
+
+rdb := redis.NewClient(&redis.Options{
+ Addr: "user-redis-master.juwan.svc.cluster.local:6379",
+ Password: password,
+})
+```
+
+**❌ 不推荐:**
+```yaml
+# 不要在配置文件中硬编码密码
+Redis:
+ Password: "hardcoded_password" # ❌ 不安全
+```
+
+---
+
+## 🔒 安全建议
+
+### 1. 密码强度
+
+**检查当前密码强度:**
+```bash
+PASSWORD=$(kubectl get secret user-redis -n juwan -o jsonpath='{.data.password}' | base64 -d)
+echo "密码长度: ${#PASSWORD}"
+```
+
+**推荐:**
+- ✅ 至少 32 字符
+- ✅ 包含大小写字母、数字、特殊字符
+- ✅ 使用密码生成器:`openssl rand -base64 32`
+
+### 2. 权限最小化
+
+**不要所有应用都用 default 超级用户!**
+
+```bash
+# 为不同应用创建独立用户
+# 用户 A:只能读写自己的 key
+kubectl exec -it user-redis-0 -n juwan -c user-redis -- \
+ redis-cli -a ACL SETUSER app_a on \
+ '>password_a' ~app_a:* +@all
+
+# 用户 B:只读
+kubectl exec -it user-redis-0 -n juwan -c user-redis -- \
+ redis-cli -a ACL SETUSER app_b on \
+ '>password_b' ~* +@read
+```
+
+### 3. 禁止危险命令
+
+**即使是 default 用户,也应该限制危险命令:**
+
+```bash
+# 禁止 FLUSHALL, FLUSHDB, KEYS 等命令
+kubectl exec -it user-redis-0 -n juwan -c user-redis -- \
+ redis-cli -a ACL SETUSER default -flushall -flushdb -keys
+
+# 查看限制后的权限
+kubectl exec -it user-redis-0 -n juwan -c user-redis -- \
+ redis-cli -a ACL GETUSER default
+```
+
+### 4. 审计日志
+
+**启用 ACL 日志记录:**
+```bash
+# 查看最近的 ACL 事件
+kubectl exec -it user-redis-0 -n juwan -c user-redis -- \
+ redis-cli -a ACL LOG 10
+
+# 输出示例:
+# 1) reason: auth
+# username: default
+# timestamp: 1234567890
+```
+
+### 5. 定期轮换密码
+
+**建议:每 90 天轮换一次**
+
+```bash
+#!/bin/bash
+# rotate-redis-password.sh
+
+# 1. 生成新密码
+NEW_PWD=$(openssl rand -base64 32)
+
+# 2. 更新 Redis
+kubectl exec -it user-redis-0 -n juwan -c user-redis -- \
+ redis-cli -a "$OLD_PWD" ACL SETUSER default ">$NEW_PWD"
+
+# 3. 更新 Secret
+kubectl create secret generic user-redis \
+ --from-literal=password="$NEW_PWD" \
+ -n juwan --dry-run=client -o yaml | kubectl apply -f -
+
+# 4. 滚动重启应用
+kubectl rollout restart deployment/user-rpc -n juwan
+
+echo "密码已轮换,新密码: $NEW_PWD"
+```
+
+### 6. 网络隔离
+
+**NetworkPolicy 限制访问:**
+```yaml
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: redis-access-policy
+ namespace: juwan
+spec:
+ podSelector:
+ matchLabels:
+ app: user-redis
+ policyTypes:
+ - Ingress
+ ingress:
+ # 只允许 user-rpc 访问
+ - from:
+ - podSelector:
+ matchLabels:
+ app: user-rpc
+ ports:
+ - protocol: TCP
+ port: 6379
+```
+
+---
+
+## 📝 总结
+
+### 核心发现
+
+| 项目 | 值 |
+|-----|---|
+| 认证模式 | ✅ ACL (Redis 6.0+) |
+| 用户名 | `default` |
+| 密码位置 | Secret `user-redis` in namespace `juwan` |
+| 权限级别 | 超级用户(+@all ~* &*) |
+| 是否必须指定用户名 | ❌ 不必须(客户端默认使用 default) |
+
+### 最佳实践
+
+1. **开发/测试环境**
+ ```yaml
+ # 简单配置即可
+ Password:
+ # Username 省略
+ ```
+
+2. **生产环境(推荐)**
+ ```yaml
+ # 为每个应用创建独立用户
+ Username: app_user
+ Password:
+ # 限制权限和 key 范围
+ ```
+
+3. **连接方式**
+ ```
+ ⭐⭐⭐ Sentinel 模式(推荐)
+ - 自动故障转移
+ - 高可用
+ - 只需密码(username 可省略)
+ ```
+
+---
+
+**文档版本:** 1.0
+**创建日期:** 2026年2月22日
+**维护者:** DevOps Team
+**下次审查:** 2026年3月22日
diff --git a/go.mod b/go.mod
index 9183c94..e66ae21 100644
--- a/go.mod
+++ b/go.mod
@@ -9,6 +9,7 @@ require (
)
require (
+ filippo.io/edwards25519 v1.1.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
@@ -23,6 +24,7 @@ require (
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.4 // indirect
+ github.com/go-sql-driver/mysql v1.9.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
diff --git a/go.sum b/go.sum
index 8046f95..478f35a 100644
--- a/go.sum
+++ b/go.sum
@@ -1,3 +1,7 @@
+filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
+filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
+github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
+github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
github.com/alicebob/miniredis/v2 v2.36.1 h1:Dvc5oAnNOr7BIfPn7tF269U8DvRW1dBG2D5n0WrfYMI=
github.com/alicebob/miniredis/v2 v2.36.1/go.mod h1:TcL7YfarKPGDAthEtl5NBeHZfeUQj6OXMm/+iu5cLMM=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
@@ -39,6 +43,8 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
+github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo=
+github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
diff --git a/test.js b/test.js
deleted file mode 100644
index f108fb3..0000000
--- a/test.js
+++ /dev/null
@@ -1,27 +0,0 @@
-import { search } from "@inquirer/prompts"
-import Fuse from "fuse.js";
-
-const colors = [
- { title: 'Red', value: 'red' },
- { title: 'Green', value: 'green' },
- { title: 'Blue', value: 'blue' },
- { title: 'Yellow', value: 'yellow' },
-];
-
-(async () => {
- const fuse = new Fuse(colors, {
- keys: ['title'],
- threshold: 0.4,
- })
- const color = await search({
- message: "Pick a color",
- source: async (term) => {
- if (!term) {
- return colors.map(s => s.value);
- }
- const result = fuse.search(term);
- return result.map(s => s.item.value);
- }
- })
- console.log(color); // => { color: 'green' }
-})();