add: envoy redis

This commit is contained in:
wwweww
2026-02-23 15:54:33 +08:00
parent 26864d578e
commit 4898aecd3b
79 changed files with 9520 additions and 650 deletions
+8
View File
@@ -0,0 +1,8 @@
# Default ignored files
/shelf/
/workspace.xml
# Editor-based HTTP Client requests
/httpRequests/
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml
+8
View File
@@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/st-1-example.iml" filepath="$PROJECT_DIR$/.idea/st-1-example.iml" />
</modules>
</component>
</project>
+9
View File
@@ -0,0 +1,9 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="WEB_MODULE" version="4">
<component name="Go" enabled="true" />
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>
Generated
+6
View File
@@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="" vcs="Git" />
</component>
</project>
-25
View File
@@ -1,25 +0,0 @@
// Code generated by goctl. DO NOT EDIT.
// goctl 1.9.2
package handler
import (
"net/http"
"juwan-backend/app/user/api/internal/svc"
"github.com/zeromicro/go-zero/rest"
)
func RegisterHandlers(server *rest.Server, serverCtx *svc.ServiceContext) {
server.AddRoutes(
[]rest.Route{
{
// Get user infomaction by user id
Method: http.MethodPost,
Path: "/user/info",
Handler: userInfoHandler(serverCtx),
},
},
)
}
@@ -1,43 +0,0 @@
// Code scaffolded by goctl. Safe to edit.
// goctl 1.9.2
package logic
import (
"context"
"juwan-backend/app/user/api/internal/svc"
"juwan-backend/app/user/api/internal/types"
"juwan-backend/app/user/rpc/usercenter"
"github.com/zeromicro/go-zero/core/logx"
)
type UserInfoLogic struct {
logx.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
// Get user infomaction by user id
func NewUserInfoLogic(ctx context.Context, svcCtx *svc.ServiceContext) *UserInfoLogic {
return &UserInfoLogic{
Logger: logx.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *UserInfoLogic) UserInfo(req *types.UserInfoReq) (resp *types.UserInfoResp, err error) {
logx.Infof("Request user info, user id: %d", req.UserId)
res, err := l.svcCtx.Usercenter.GetUserInfo(l.ctx, &usercenter.GetUserInfoReq{
Id: req.UserId,
})
if err != nil {
return nil, err
}
return &types.UserInfoResp{
UserId: res.Id,
Nickname: res.Nickname,
}, nil
}
@@ -1,23 +0,0 @@
// Code scaffolded by goctl. Safe to edit.
// goctl 1.9.2
package svc
import (
"juwan-backend/app/user/api/internal/config"
"juwan-backend/app/user/rpc/usercenter"
"github.com/zeromicro/go-zero/zrpc"
)
type ServiceContext struct {
Config config.Config
Usercenter usercenter.Usercenter
}
func NewServiceContext(c config.Config) *ServiceContext {
return &ServiceContext{
Config: c,
Usercenter: usercenter.NewUsercenter(zrpc.MustNewClient(c.UsercenterRpcConf)),
}
}
-13
View File
@@ -1,13 +0,0 @@
// Code generated by goctl. DO NOT EDIT.
// goctl 1.9.2
package types
type UserInfoReq struct {
UserId int64 `json:"userId"`
}
type UserInfoResp struct {
UserId int64 `json:"userId"`
Nickname string `json:"nickname"`
}
-6
View File
@@ -1,6 +0,0 @@
Name: pb.rpc
ListenOn: 0.0.0.0:9001
# UserDB: "${DB_URI}?sslmode=disable"
DB:
UserDB: "${DB_URI}?sslmode=disable"
-10
View File
@@ -1,10 +0,0 @@
package config
import "github.com/zeromicro/go-zero/zrpc"
type Config struct {
zrpc.RpcServerConf
DB struct {
UserDB string
}
}
@@ -1,41 +0,0 @@
package logic
import (
"context"
"juwan-backend/app/user/rpc/internal/svc"
"juwan-backend/app/user/rpc/pb"
"github.com/zeromicro/go-zero/core/logx"
)
type GetUserInfoLogic struct {
ctx context.Context
svcCtx *svc.ServiceContext
logx.Logger
}
func NewGetUserInfoLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetUserInfoLogic {
return &GetUserInfoLogic{
ctx: ctx,
svcCtx: svcCtx,
Logger: logx.WithContext(ctx),
}
}
func (l *GetUserInfoLogic) GetUserInfo(in *pb.GetUserInfoReq) (*pb.GetUserInfoResp, error) {
users := map[int64]string{
1: "WangHuahua",
2: "LiKunkun",
}
nikename := "Unknow"
if name, ok := users[in.Id]; ok {
nikename = name
}
return &pb.GetUserInfoResp{
Id: in.Id,
Nickname: nikename,
}, nil
}
@@ -1,29 +0,0 @@
// Code generated by goctl. DO NOT EDIT.
// goctl 1.9.2
// Source: user.proto
package server
import (
"context"
"juwan-backend/app/user/rpc/internal/logic"
"juwan-backend/app/user/rpc/internal/svc"
"juwan-backend/app/user/rpc/pb"
)
type UsercenterServer struct {
svcCtx *svc.ServiceContext
pb.UnimplementedUsercenterServer
}
func NewUsercenterServer(svcCtx *svc.ServiceContext) *UsercenterServer {
return &UsercenterServer{
svcCtx: svcCtx,
}
}
func (s *UsercenterServer) GetUserInfo(ctx context.Context, in *pb.GetUserInfoReq) (*pb.GetUserInfoResp, error) {
l := logic.NewGetUserInfoLogic(ctx, s.svcCtx)
return l.GetUserInfo(in)
}
@@ -1,13 +0,0 @@
package svc
import "juwan-backend/app/user/rpc/internal/config"
type ServiceContext struct {
Config config.Config
}
func NewServiceContext(c config.Config) *ServiceContext {
return &ServiceContext{
Config: c,
}
}
-184
View File
@@ -1,184 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.9
// protoc v6.32.0
// source: user.proto
package pb
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type GetUserInfoReq struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetUserInfoReq) Reset() {
*x = GetUserInfoReq{}
mi := &file_user_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetUserInfoReq) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetUserInfoReq) ProtoMessage() {}
func (x *GetUserInfoReq) ProtoReflect() protoreflect.Message {
mi := &file_user_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetUserInfoReq.ProtoReflect.Descriptor instead.
func (*GetUserInfoReq) Descriptor() ([]byte, []int) {
return file_user_proto_rawDescGZIP(), []int{0}
}
func (x *GetUserInfoReq) GetId() int64 {
if x != nil {
return x.Id
}
return 0
}
type GetUserInfoResp struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
Nickname string `protobuf:"bytes,2,opt,name=nickname,proto3" json:"nickname,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetUserInfoResp) Reset() {
*x = GetUserInfoResp{}
mi := &file_user_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetUserInfoResp) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetUserInfoResp) ProtoMessage() {}
func (x *GetUserInfoResp) ProtoReflect() protoreflect.Message {
mi := &file_user_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetUserInfoResp.ProtoReflect.Descriptor instead.
func (*GetUserInfoResp) Descriptor() ([]byte, []int) {
return file_user_proto_rawDescGZIP(), []int{1}
}
func (x *GetUserInfoResp) GetId() int64 {
if x != nil {
return x.Id
}
return 0
}
func (x *GetUserInfoResp) GetNickname() string {
if x != nil {
return x.Nickname
}
return ""
}
var File_user_proto protoreflect.FileDescriptor
const file_user_proto_rawDesc = "" +
"\n" +
"\n" +
"user.proto\x12\x02pb\" \n" +
"\x0eGetUserInfoReq\x12\x0e\n" +
"\x02id\x18\x01 \x01(\x03R\x02id\"=\n" +
"\x0fGetUserInfoResp\x12\x0e\n" +
"\x02id\x18\x01 \x01(\x03R\x02id\x12\x1a\n" +
"\bnickname\x18\x02 \x01(\tR\bnickname2D\n" +
"\n" +
"Usercenter\x126\n" +
"\vGetUserInfo\x12\x12.pb.GetUserInfoReq\x1a\x13.pb.GetUserInfoRespB\x06Z\x04./pbb\x06proto3"
var (
file_user_proto_rawDescOnce sync.Once
file_user_proto_rawDescData []byte
)
func file_user_proto_rawDescGZIP() []byte {
file_user_proto_rawDescOnce.Do(func() {
file_user_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_user_proto_rawDesc), len(file_user_proto_rawDesc)))
})
return file_user_proto_rawDescData
}
var file_user_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_user_proto_goTypes = []any{
(*GetUserInfoReq)(nil), // 0: pb.GetUserInfoReq
(*GetUserInfoResp)(nil), // 1: pb.GetUserInfoResp
}
var file_user_proto_depIdxs = []int32{
0, // 0: pb.Usercenter.GetUserInfo:input_type -> pb.GetUserInfoReq
1, // 1: pb.Usercenter.GetUserInfo:output_type -> pb.GetUserInfoResp
1, // [1:2] is the sub-list for method output_type
0, // [0:1] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_user_proto_init() }
func file_user_proto_init() {
if File_user_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_user_proto_rawDesc), len(file_user_proto_rawDesc)),
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_user_proto_goTypes,
DependencyIndexes: file_user_proto_depIdxs,
MessageInfos: file_user_proto_msgTypes,
}.Build()
File_user_proto = out.File
file_user_proto_goTypes = nil
file_user_proto_depIdxs = nil
}
-121
View File
@@ -1,121 +0,0 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc v6.32.0
// source: user.proto
package pb
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
Usercenter_GetUserInfo_FullMethodName = "/pb.Usercenter/GetUserInfo"
)
// UsercenterClient is the client API for Usercenter service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type UsercenterClient interface {
GetUserInfo(ctx context.Context, in *GetUserInfoReq, opts ...grpc.CallOption) (*GetUserInfoResp, error)
}
type usercenterClient struct {
cc grpc.ClientConnInterface
}
func NewUsercenterClient(cc grpc.ClientConnInterface) UsercenterClient {
return &usercenterClient{cc}
}
func (c *usercenterClient) GetUserInfo(ctx context.Context, in *GetUserInfoReq, opts ...grpc.CallOption) (*GetUserInfoResp, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetUserInfoResp)
err := c.cc.Invoke(ctx, Usercenter_GetUserInfo_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// UsercenterServer is the server API for Usercenter service.
// All implementations must embed UnimplementedUsercenterServer
// for forward compatibility.
type UsercenterServer interface {
GetUserInfo(context.Context, *GetUserInfoReq) (*GetUserInfoResp, error)
mustEmbedUnimplementedUsercenterServer()
}
// UnimplementedUsercenterServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedUsercenterServer struct{}
func (UnimplementedUsercenterServer) GetUserInfo(context.Context, *GetUserInfoReq) (*GetUserInfoResp, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetUserInfo not implemented")
}
func (UnimplementedUsercenterServer) mustEmbedUnimplementedUsercenterServer() {}
func (UnimplementedUsercenterServer) testEmbeddedByValue() {}
// UnsafeUsercenterServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to UsercenterServer will
// result in compilation errors.
type UnsafeUsercenterServer interface {
mustEmbedUnimplementedUsercenterServer()
}
func RegisterUsercenterServer(s grpc.ServiceRegistrar, srv UsercenterServer) {
// If the following call pancis, it indicates UnimplementedUsercenterServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&Usercenter_ServiceDesc, srv)
}
func _Usercenter_GetUserInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetUserInfoReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(UsercenterServer).GetUserInfo(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Usercenter_GetUserInfo_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(UsercenterServer).GetUserInfo(ctx, req.(*GetUserInfoReq))
}
return interceptor(ctx, in, info, handler)
}
// Usercenter_ServiceDesc is the grpc.ServiceDesc for Usercenter service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Usercenter_ServiceDesc = grpc.ServiceDesc{
ServiceName: "pb.Usercenter",
HandlerType: (*UsercenterServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetUserInfo",
Handler: _Usercenter_GetUserInfo_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "user.proto",
}
-38
View File
@@ -1,38 +0,0 @@
// Code generated by goctl. DO NOT EDIT.
// goctl 1.9.2
// Source: user.proto
package usercenter
import (
"context"
"juwan-backend/app/user/rpc/pb"
"github.com/zeromicro/go-zero/zrpc"
"google.golang.org/grpc"
)
type (
GetUserInfoReq = pb.GetUserInfoReq
GetUserInfoResp = pb.GetUserInfoResp
Usercenter interface {
GetUserInfo(ctx context.Context, in *GetUserInfoReq, opts ...grpc.CallOption) (*GetUserInfoResp, error)
}
defaultUsercenter struct {
cli zrpc.Client
}
)
func NewUsercenter(cli zrpc.Client) Usercenter {
return &defaultUsercenter{
cli: cli,
}
}
func (m *defaultUsercenter) GetUserInfo(ctx context.Context, in *GetUserInfoReq, opts ...grpc.CallOption) (*GetUserInfoResp, error) {
client := pb.NewUsercenterClient(m.cli.Conn())
return client.GetUserInfo(ctx, in, opts...)
}
@@ -2,5 +2,10 @@ Name: user-api
Host: 0.0.0.0
Port: 8888
Prometheus:
Host: 0.0.0.0
Port: 4001
Path: /metrics
UsercenterRpcConf:
Target: k8s://juwan/user-rpc-svc:9001
+60
View File
@@ -0,0 +1,60 @@
// Code generated by goctl. DO NOT EDIT.
// goctl 1.9.2
package handler
import (
"net/http"
user "juwan-backend/app/users/api/internal/handler/user"
"juwan-backend/app/users/api/internal/svc"
"github.com/zeromicro/go-zero/rest"
)
func RegisterHandlers(server *rest.Server, serverCtx *svc.ServiceContext) {
server.AddRoutes(
rest.WithMiddlewares(
[]rest.Middleware{serverCtx.Logger},
[]rest.Route{
{
// 获取用户信息
Method: http.MethodGet,
Path: "/:userId",
Handler: user.GetUserInfoHandler(serverCtx),
},
{
// 修改用户信息
Method: http.MethodPut,
Path: "/:userId",
Handler: user.UpdateUserInfoHandler(serverCtx),
},
{
// 用户登出
Method: http.MethodPost,
Path: "/:userId/logout",
Handler: user.LogoutHandler(serverCtx),
},
{
// 修改用户密码
Method: http.MethodPut,
Path: "/:userId/password",
Handler: user.UpdatePasswordHandler(serverCtx),
},
{
// 用户登录接口
Method: http.MethodPost,
Path: "/login",
Handler: user.LoginHandler(serverCtx),
},
{
// 用户注册接口
Method: http.MethodPost,
Path: "/register",
Handler: user.RegisterHandler(serverCtx),
},
}...,
),
rest.WithPrefix("/api/users"),
)
}
@@ -0,0 +1,32 @@
// Code scaffolded by goctl. Safe to edit.
// goctl 1.9.2
package user
import (
"net/http"
"github.com/zeromicro/go-zero/rest/httpx"
"juwan-backend/app/users/api/internal/logic/user"
"juwan-backend/app/users/api/internal/svc"
"juwan-backend/app/users/api/internal/types"
)
// 获取用户信息
func GetUserInfoHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var req types.GetUserInfoReq
if err := httpx.Parse(r, &req); err != nil {
httpx.ErrorCtx(r.Context(), w, err)
return
}
l := user.NewGetUserInfoLogic(r.Context(), svcCtx)
resp, err := l.GetUserInfo(&req)
if err != nil {
httpx.ErrorCtx(r.Context(), w, err)
} else {
httpx.OkJsonCtx(r.Context(), w, resp)
}
}
}
@@ -0,0 +1,32 @@
// Code scaffolded by goctl. Safe to edit.
// goctl 1.9.2
package user
import (
"net/http"
"github.com/zeromicro/go-zero/rest/httpx"
"juwan-backend/app/users/api/internal/logic/user"
"juwan-backend/app/users/api/internal/svc"
"juwan-backend/app/users/api/internal/types"
)
// 用户登录接口
func LoginHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var req types.LoginReq
if err := httpx.Parse(r, &req); err != nil {
httpx.ErrorCtx(r.Context(), w, err)
return
}
l := user.NewLoginLogic(r.Context(), svcCtx)
resp, err := l.Login(&req)
if err != nil {
httpx.ErrorCtx(r.Context(), w, err)
} else {
httpx.OkJsonCtx(r.Context(), w, resp)
}
}
}
@@ -0,0 +1,32 @@
// Code scaffolded by goctl. Safe to edit.
// goctl 1.9.2
package user
import (
"net/http"
"github.com/zeromicro/go-zero/rest/httpx"
"juwan-backend/app/users/api/internal/logic/user"
"juwan-backend/app/users/api/internal/svc"
"juwan-backend/app/users/api/internal/types"
)
// 用户登出
func LogoutHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var req types.LogoutReq
if err := httpx.Parse(r, &req); err != nil {
httpx.ErrorCtx(r.Context(), w, err)
return
}
l := user.NewLogoutLogic(r.Context(), svcCtx)
resp, err := l.Logout(&req)
if err != nil {
httpx.ErrorCtx(r.Context(), w, err)
} else {
httpx.OkJsonCtx(r.Context(), w, resp)
}
}
}
@@ -1,28 +1,28 @@
// Code scaffolded by goctl. Safe to edit.
// goctl 1.9.2
package handler
package user
import (
"net/http"
"github.com/zeromicro/go-zero/rest/httpx"
"juwan-backend/app/user/api/internal/logic"
"juwan-backend/app/user/api/internal/svc"
"juwan-backend/app/user/api/internal/types"
"juwan-backend/app/users/api/internal/logic/user"
"juwan-backend/app/users/api/internal/svc"
"juwan-backend/app/users/api/internal/types"
)
// Get user infomaction by user id
func userInfoHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
// 用户注册接口
func RegisterHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var req types.UserInfoReq
var req types.RegisterReq
if err := httpx.Parse(r, &req); err != nil {
httpx.ErrorCtx(r.Context(), w, err)
return
}
l := logic.NewUserInfoLogic(r.Context(), svcCtx)
resp, err := l.UserInfo(&req)
l := user.NewRegisterLogic(r.Context(), svcCtx)
resp, err := l.Register(&req)
if err != nil {
httpx.ErrorCtx(r.Context(), w, err)
} else {
@@ -0,0 +1,32 @@
// Code scaffolded by goctl. Safe to edit.
// goctl 1.9.2
package user
import (
"net/http"
"github.com/zeromicro/go-zero/rest/httpx"
"juwan-backend/app/users/api/internal/logic/user"
"juwan-backend/app/users/api/internal/svc"
"juwan-backend/app/users/api/internal/types"
)
// 修改用户密码
func UpdatePasswordHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var req types.UpdatePasswordReq
if err := httpx.Parse(r, &req); err != nil {
httpx.ErrorCtx(r.Context(), w, err)
return
}
l := user.NewUpdatePasswordLogic(r.Context(), svcCtx)
resp, err := l.UpdatePassword(&req)
if err != nil {
httpx.ErrorCtx(r.Context(), w, err)
} else {
httpx.OkJsonCtx(r.Context(), w, resp)
}
}
}
@@ -0,0 +1,32 @@
// Code scaffolded by goctl. Safe to edit.
// goctl 1.9.2
package user
import (
"net/http"
"github.com/zeromicro/go-zero/rest/httpx"
"juwan-backend/app/users/api/internal/logic/user"
"juwan-backend/app/users/api/internal/svc"
"juwan-backend/app/users/api/internal/types"
)
// 修改用户信息
func UpdateUserInfoHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var req types.UpdateUserInfoReq
if err := httpx.Parse(r, &req); err != nil {
httpx.ErrorCtx(r.Context(), w, err)
return
}
l := user.NewUpdateUserInfoLogic(r.Context(), svcCtx)
resp, err := l.UpdateUserInfo(&req)
if err != nil {
httpx.ErrorCtx(r.Context(), w, err)
} else {
httpx.OkJsonCtx(r.Context(), w, resp)
}
}
}
@@ -0,0 +1,34 @@
// Code scaffolded by goctl. Safe to edit.
// goctl 1.9.2
package user
import (
"context"
"juwan-backend/app/users/api/internal/svc"
"juwan-backend/app/users/api/internal/types"
"github.com/zeromicro/go-zero/core/logx"
)
type GetUserInfoLogic struct {
logx.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
// 获取用户信息
func NewGetUserInfoLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetUserInfoLogic {
return &GetUserInfoLogic{
Logger: logx.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *GetUserInfoLogic) GetUserInfo(req *types.GetUserInfoReq) (resp *types.UserInfo, err error) {
// todo: add your logic here and delete this line
return
}
@@ -0,0 +1,34 @@
// Code scaffolded by goctl. Safe to edit.
// goctl 1.9.2
package user
import (
"context"
"juwan-backend/app/users/api/internal/svc"
"juwan-backend/app/users/api/internal/types"
"github.com/zeromicro/go-zero/core/logx"
)
type LoginLogic struct {
logx.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
// 用户登录接口
func NewLoginLogic(ctx context.Context, svcCtx *svc.ServiceContext) *LoginLogic {
return &LoginLogic{
Logger: logx.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *LoginLogic) Login(req *types.LoginReq) (resp *types.LoginResp, err error) {
// todo: add your logic here and delete this line
return
}
@@ -0,0 +1,34 @@
// Code scaffolded by goctl. Safe to edit.
// goctl 1.9.2
package user
import (
"context"
"juwan-backend/app/users/api/internal/svc"
"juwan-backend/app/users/api/internal/types"
"github.com/zeromicro/go-zero/core/logx"
)
type LogoutLogic struct {
logx.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
// 用户登出
func NewLogoutLogic(ctx context.Context, svcCtx *svc.ServiceContext) *LogoutLogic {
return &LogoutLogic{
Logger: logx.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *LogoutLogic) Logout(req *types.LogoutReq) (resp *types.LogoutResp, err error) {
// todo: add your logic here and delete this line
return
}
@@ -0,0 +1,55 @@
// Code scaffolded by goctl. Safe to edit.
// goctl 1.9.2
package user
import (
"context"
"errors"
"juwan-backend/app/users/api/internal/svc"
"juwan-backend/app/users/api/internal/types"
"juwan-backend/app/users/rpc/pb"
"github.com/google/uuid"
"github.com/zeromicro/go-zero/core/logx"
)
type RegisterLogic struct {
logx.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
// 用户注册接口
func NewRegisterLogic(ctx context.Context, svcCtx *svc.ServiceContext) *RegisterLogic {
return &RegisterLogic{
Logger: logx.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *RegisterLogic) Register(req *types.RegisterReq) (resp *types.RegisterResp, err error) {
// todo: add your logic here and delete this line
user, err := l.svcCtx.UserRpc.GetUserByUsername(l.ctx, &pb.GetUserByUsernameReq{
Username: req.Username,
})
if err == nil || user != nil {
return nil, errors.New("User is exisit")
}
id, err := uuid.NewRandom()
if err != nil {
return nil, errors.New("Register is failed")
}
_, err = l.svcCtx.UserRpc.AddUsers(l.ctx, &pb.AddUsersReq{
UserId: id.String(),
Username: req.Username,
Passwd: req.Password,
Phone: req.Phone,
State: true,
})
return
}
@@ -0,0 +1,34 @@
// Code scaffolded by goctl. Safe to edit.
// goctl 1.9.2
package user
import (
"context"
"juwan-backend/app/users/api/internal/svc"
"juwan-backend/app/users/api/internal/types"
"github.com/zeromicro/go-zero/core/logx"
)
type UpdatePasswordLogic struct {
logx.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
// 修改用户密码
func NewUpdatePasswordLogic(ctx context.Context, svcCtx *svc.ServiceContext) *UpdatePasswordLogic {
return &UpdatePasswordLogic{
Logger: logx.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *UpdatePasswordLogic) UpdatePassword(req *types.UpdatePasswordReq) (resp *types.UpdatePasswordResp, err error) {
// todo: add your logic here and delete this line
return
}
@@ -0,0 +1,34 @@
// Code scaffolded by goctl. Safe to edit.
// goctl 1.9.2
package user
import (
"context"
"juwan-backend/app/users/api/internal/svc"
"juwan-backend/app/users/api/internal/types"
"github.com/zeromicro/go-zero/core/logx"
)
type UpdateUserInfoLogic struct {
logx.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
// 修改用户信息
func NewUpdateUserInfoLogic(ctx context.Context, svcCtx *svc.ServiceContext) *UpdateUserInfoLogic {
return &UpdateUserInfoLogic{
Logger: logx.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *UpdateUserInfoLogic) UpdateUserInfo(req *types.UpdateUserInfoReq) (resp *types.UpdateUserInfoResp, err error) {
// todo: add your logic here and delete this line
return
}
@@ -0,0 +1,22 @@
// Code scaffolded by goctl. Safe to edit.
// goctl 1.9.2
package middleware
import "net/http"
type LoggerMiddleware struct {
}
func NewLoggerMiddleware() *LoggerMiddleware {
return &LoggerMiddleware{}
}
func (m *LoggerMiddleware) Handle(next http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
// TODO generate middleware implement function, delete after code implementation
// Passthrough to next handler if need
next(w, r)
}
}
@@ -0,0 +1,27 @@
// Code scaffolded by goctl. Safe to edit.
// goctl 1.9.2
package svc
import (
"juwan-backend/app/users/api/internal/config"
"juwan-backend/app/users/api/internal/middleware"
"juwan-backend/app/users/rpc/usercenter"
"github.com/zeromicro/go-zero/rest"
"github.com/zeromicro/go-zero/zrpc"
)
type ServiceContext struct {
Config config.Config
Logger rest.Middleware
UserRpc usercenter.Usercenter
}
func NewServiceContext(c config.Config) *ServiceContext {
return &ServiceContext{
Config: c,
Logger: middleware.NewLoggerMiddleware().Handle,
UserRpc: usercenter.NewUsercenter(zrpc.MustNewClient(c.UsercenterRpcConf)),
}
}
+81
View File
@@ -0,0 +1,81 @@
// Code generated by goctl. DO NOT EDIT.
// goctl 1.9.2
package types
type ErrorResp struct {
Code int `json:"code"`
Message string `json:"message"`
}
type GetUserInfoReq struct {
UserId int64 `path:"userId" binding:"required,gt=0"`
}
type LoginReq struct {
Username string `json:"username" binding:"required"`
Password string `json:"password" binding:"required"`
}
type LoginResp struct {
UserId int64 `json:"userId"`
Username string `json:"username"`
Email string `json:"email"`
Token string `json:"token"`
Expires int64 `json:"expires"`
}
type LogoutReq struct {
UserId int64 `path:"userId" binding:"required,gt=0"`
}
type LogoutResp struct {
Message string `json:"message"`
}
type RegisterReq struct {
Username string `json:"username" binding:"required,min=3,max=50"`
Password string `json:"password" binding:"required,min=6,max=128"`
Email string `json:"email,omitempty" binding:"omitempty,email"`
Phone string `json:"phone,omitempty" binding:"omitempty,len=11"`
}
type RegisterResp struct {
UserId int64 `json:"userId"`
Username string `json:"username"`
Email string `json:"email"`
Message string `json:"message"`
}
type UpdatePasswordReq struct {
UserId int64 `path:"userId" binding:"required,gt=0"`
OldPassword string `json:"oldPassword" binding:"required"`
NewPassword string `json:"newPassword" binding:"required,min=6,max=128"`
}
type UpdatePasswordResp struct {
Message string `json:"message"`
}
type UpdateUserInfoReq struct {
UserId int64 `path:"userId" binding:"required,gt=0"`
Email string `json:"email" binding:"omitempty,email"`
Phone string `json:"phone" binding:"omitempty,len=11"`
Avatar string `json:"avatar" binding:"omitempty,url"`
}
type UpdateUserInfoResp struct {
UserId int64 `json:"userId"`
Message string `json:"message"`
}
type UserInfo struct {
UserId int64 `json:"userId"`
Username string `json:"username"`
Email string `json:"email"`
Phone string `json:"phone"`
Avatar string `json:"avatar"`
Status int `json:"status"`
CreateAt int64 `json:"createAt"`
UpdateAt int64 `json:"updateAt"`
}
@@ -7,9 +7,9 @@ import (
"flag"
"fmt"
"juwan-backend/app/user/api/internal/config"
"juwan-backend/app/user/api/internal/handler"
"juwan-backend/app/user/api/internal/svc"
"juwan-backend/app/users/api/internal/config"
"juwan-backend/app/users/api/internal/handler"
"juwan-backend/app/users/api/internal/svc"
"github.com/zeromicro/go-zero/core/conf"
"github.com/zeromicro/go-zero/rest"
+15
View File
@@ -0,0 +1,15 @@
Name: pb.rpc
ListenOn: 0.0.0.0:9001
Prometheus:
Host: 0.0.0.0
Port: 9001
Path: /metrics
DataSource: "${DB_URI}?sslmode=disable"
CacheConf:
- Host: "${REDIS_HOST}"
Type: cluster
Pass: "${REDIS_PASSWORD}"
User: "default"
+12
View File
@@ -0,0 +1,12 @@
package config
import (
"github.com/zeromicro/go-zero/core/stores/cache"
"github.com/zeromicro/go-zero/zrpc"
)
type Config struct {
zrpc.RpcServerConf
DataSource string `json:"dataSource"`
CacheConf cache.CacheConf
}
@@ -0,0 +1,31 @@
package logic
import (
"context"
"juwan-backend/app/users/rpc/internal/svc"
"juwan-backend/app/users/rpc/pb"
"github.com/zeromicro/go-zero/core/logx"
)
type AddUsersLogic struct {
ctx context.Context
svcCtx *svc.ServiceContext
logx.Logger
}
func NewAddUsersLogic(ctx context.Context, svcCtx *svc.ServiceContext) *AddUsersLogic {
return &AddUsersLogic{
ctx: ctx,
svcCtx: svcCtx,
Logger: logx.WithContext(ctx),
}
}
// -----------------------users-----------------------
func (l *AddUsersLogic) AddUsers(in *pb.AddUsersReq) (*pb.AddUsersResp, error) {
// todo: add your logic here and delete this line
return &pb.AddUsersResp{}, nil
}
@@ -0,0 +1,30 @@
package logic
import (
"context"
"juwan-backend/app/users/rpc/internal/svc"
"juwan-backend/app/users/rpc/pb"
"github.com/zeromicro/go-zero/core/logx"
)
type DelUsersLogic struct {
ctx context.Context
svcCtx *svc.ServiceContext
logx.Logger
}
func NewDelUsersLogic(ctx context.Context, svcCtx *svc.ServiceContext) *DelUsersLogic {
return &DelUsersLogic{
ctx: ctx,
svcCtx: svcCtx,
Logger: logx.WithContext(ctx),
}
}
func (l *DelUsersLogic) DelUsers(in *pb.DelUsersReq) (*pb.DelUsersResp, error) {
// todo: add your logic here and delete this line
return &pb.DelUsersResp{}, nil
}
@@ -0,0 +1,30 @@
package logic
import (
"context"
"juwan-backend/app/users/rpc/internal/svc"
"juwan-backend/app/users/rpc/pb"
"github.com/zeromicro/go-zero/core/logx"
)
type GetUserByUsernameLogic struct {
ctx context.Context
svcCtx *svc.ServiceContext
logx.Logger
}
func NewGetUserByUsernameLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetUserByUsernameLogic {
return &GetUserByUsernameLogic{
ctx: ctx,
svcCtx: svcCtx,
Logger: logx.WithContext(ctx),
}
}
func (l *GetUserByUsernameLogic) GetUserByUsername(in *pb.GetUsersByIdReq) (*pb.GetUsersByIdResp, error) {
// todo: add your logic here and delete this line
return &pb.GetUsersByIdResp{}, nil
}
@@ -0,0 +1,30 @@
package logic
import (
"context"
"juwan-backend/app/users/rpc/internal/svc"
"juwan-backend/app/users/rpc/pb"
"github.com/zeromicro/go-zero/core/logx"
)
type GetUsersByIdLogic struct {
ctx context.Context
svcCtx *svc.ServiceContext
logx.Logger
}
func NewGetUsersByIdLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetUsersByIdLogic {
return &GetUsersByIdLogic{
ctx: ctx,
svcCtx: svcCtx,
Logger: logx.WithContext(ctx),
}
}
func (l *GetUsersByIdLogic) GetUsersById(in *pb.GetUsersByIdReq) (*pb.GetUsersByIdResp, error) {
// todo: add your logic here and delete this line
return &pb.GetUsersByIdResp{}, nil
}
@@ -0,0 +1,30 @@
package logic
import (
"context"
"juwan-backend/app/users/rpc/internal/svc"
"juwan-backend/app/users/rpc/pb"
"github.com/zeromicro/go-zero/core/logx"
)
type SearchUsersLogic struct {
ctx context.Context
svcCtx *svc.ServiceContext
logx.Logger
}
func NewSearchUsersLogic(ctx context.Context, svcCtx *svc.ServiceContext) *SearchUsersLogic {
return &SearchUsersLogic{
ctx: ctx,
svcCtx: svcCtx,
Logger: logx.WithContext(ctx),
}
}
func (l *SearchUsersLogic) SearchUsers(in *pb.SearchUsersReq) (*pb.SearchUsersResp, error) {
// todo: add your logic here and delete this line
return &pb.SearchUsersResp{}, nil
}
@@ -0,0 +1,30 @@
package logic
import (
"context"
"juwan-backend/app/users/rpc/internal/svc"
"juwan-backend/app/users/rpc/pb"
"github.com/zeromicro/go-zero/core/logx"
)
type UpdateUsersLogic struct {
ctx context.Context
svcCtx *svc.ServiceContext
logx.Logger
}
func NewUpdateUsersLogic(ctx context.Context, svcCtx *svc.ServiceContext) *UpdateUsersLogic {
return &UpdateUsersLogic{
ctx: ctx,
svcCtx: svcCtx,
Logger: logx.WithContext(ctx),
}
}
func (l *UpdateUsersLogic) UpdateUsers(in *pb.UpdateUsersReq) (*pb.UpdateUsersResp, error) {
// todo: add your logic here and delete this line
return &pb.UpdateUsersResp{}, nil
}
@@ -0,0 +1,27 @@
package models
import (
"github.com/zeromicro/go-zero/core/stores/cache"
"github.com/zeromicro/go-zero/core/stores/sqlx"
)
var _ UsersModel = (*customUsersModel)(nil)
type (
// UsersModel is an interface to be customized, add more methods here,
// and implement the added methods in customUsersModel.
UsersModel interface {
usersModel
}
customUsersModel struct {
*defaultUsersModel
}
)
// NewUsersModel returns a model for the database table.
func NewUsersModel(conn sqlx.SqlConn, c cache.CacheConf, opts ...cache.Option) UsersModel {
return &customUsersModel{
defaultUsersModel: newUsersModel(conn, c, opts...),
}
}
@@ -0,0 +1,180 @@
// Code generated by goctl. DO NOT EDIT.
// versions:
// goctl version: 1.9.2
package models
import (
"context"
"database/sql"
"fmt"
"strings"
"time"
"github.com/zeromicro/go-zero/core/stores/builder"
"github.com/zeromicro/go-zero/core/stores/cache"
"github.com/zeromicro/go-zero/core/stores/sqlc"
"github.com/zeromicro/go-zero/core/stores/sqlx"
"github.com/zeromicro/go-zero/core/stringx"
)
var (
usersFieldNames = builder.RawFieldNames(&Users{}, true)
usersRows = strings.Join(usersFieldNames, ",")
usersRowsExpectAutoSet = strings.Join(stringx.Remove(usersFieldNames, "create_at", "create_time", "created_at", "update_at", "update_time", "updated_at"), ",")
usersRowsWithPlaceHolder = builder.PostgreSqlJoin(stringx.Remove(usersFieldNames, "user_id", "create_at", "create_time", "created_at", "update_at", "update_time", "updated_at"))
cachePublicUsersUserIdPrefix = "cache:public:users:userId:"
cachePublicUsersPhonePrefix = "cache:public:users:phone:"
cachePublicUsersUsernamePrefix = "cache:public:users:username:"
)
type (
usersModel interface {
Insert(ctx context.Context, data *Users) (sql.Result, error)
FindOne(ctx context.Context, userId string) (*Users, error)
FindOneByPhone(ctx context.Context, phone string) (*Users, error)
FindOneByUsername(ctx context.Context, username string) (*Users, error)
Update(ctx context.Context, data *Users) error
Delete(ctx context.Context, userId string) error
}
defaultUsersModel struct {
sqlc.CachedConn
table string
}
Users struct {
UserId string `db:"user_id"`
Username string `db:"username"`
Passwd string `db:"passwd"`
Nickname string `db:"nickname"`
Phone string `db:"phone"`
RoleType int64 `db:"role_type"`
IsVerified bool `db:"is_verified"`
State bool `db:"state"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
DeletedAt sql.NullTime `db:"deleted_at"`
}
)
func newUsersModel(conn sqlx.SqlConn, c cache.CacheConf, opts ...cache.Option) *defaultUsersModel {
return &defaultUsersModel{
CachedConn: sqlc.NewConn(conn, c, opts...),
table: `"public"."users"`,
}
}
func (m *defaultUsersModel) Delete(ctx context.Context, userId string) error {
data, err := m.FindOne(ctx, userId)
if err != nil {
return err
}
publicUsersPhoneKey := fmt.Sprintf("%s%v", cachePublicUsersPhonePrefix, data.Phone)
publicUsersUserIdKey := fmt.Sprintf("%s%v", cachePublicUsersUserIdPrefix, userId)
publicUsersUsernameKey := fmt.Sprintf("%s%v", cachePublicUsersUsernamePrefix, data.Username)
_, err = m.ExecCtx(ctx, func(ctx context.Context, conn sqlx.SqlConn) (result sql.Result, err error) {
query := fmt.Sprintf("delete from %s where user_id = $1", m.table)
return conn.ExecCtx(ctx, query, userId)
}, publicUsersPhoneKey, publicUsersUserIdKey, publicUsersUsernameKey)
return err
}
func (m *defaultUsersModel) FindOne(ctx context.Context, userId string) (*Users, error) {
publicUsersUserIdKey := fmt.Sprintf("%s%v", cachePublicUsersUserIdPrefix, userId)
var resp Users
err := m.QueryRowCtx(ctx, &resp, publicUsersUserIdKey, func(ctx context.Context, conn sqlx.SqlConn, v any) error {
query := fmt.Sprintf("select %s from %s where user_id = $1 limit 1", usersRows, m.table)
return conn.QueryRowCtx(ctx, v, query, userId)
})
switch err {
case nil:
return &resp, nil
case sqlc.ErrNotFound:
return nil, ErrNotFound
default:
return nil, err
}
}
func (m *defaultUsersModel) FindOneByPhone(ctx context.Context, phone string) (*Users, error) {
publicUsersPhoneKey := fmt.Sprintf("%s%v", cachePublicUsersPhonePrefix, phone)
var resp Users
err := m.QueryRowIndexCtx(ctx, &resp, publicUsersPhoneKey, m.formatPrimary, func(ctx context.Context, conn sqlx.SqlConn, v any) (i any, e error) {
query := fmt.Sprintf("select %s from %s where phone = $1 limit 1", usersRows, m.table)
if err := conn.QueryRowCtx(ctx, &resp, query, phone); err != nil {
return nil, err
}
return resp.UserId, nil
}, m.queryPrimary)
switch err {
case nil:
return &resp, nil
case sqlc.ErrNotFound:
return nil, ErrNotFound
default:
return nil, err
}
}
func (m *defaultUsersModel) FindOneByUsername(ctx context.Context, username string) (*Users, error) {
publicUsersUsernameKey := fmt.Sprintf("%s%v", cachePublicUsersUsernamePrefix, username)
var resp Users
err := m.QueryRowIndexCtx(ctx, &resp, publicUsersUsernameKey, m.formatPrimary, func(ctx context.Context, conn sqlx.SqlConn, v any) (i any, e error) {
query := fmt.Sprintf("select %s from %s where username = $1 limit 1", usersRows, m.table)
if err := conn.QueryRowCtx(ctx, &resp, query, username); err != nil {
return nil, err
}
return resp.UserId, nil
}, m.queryPrimary)
switch err {
case nil:
return &resp, nil
case sqlc.ErrNotFound:
return nil, ErrNotFound
default:
return nil, err
}
}
func (m *defaultUsersModel) Insert(ctx context.Context, data *Users) (sql.Result, error) {
publicUsersPhoneKey := fmt.Sprintf("%s%v", cachePublicUsersPhonePrefix, data.Phone)
publicUsersUserIdKey := fmt.Sprintf("%s%v", cachePublicUsersUserIdPrefix, data.UserId)
publicUsersUsernameKey := fmt.Sprintf("%s%v", cachePublicUsersUsernamePrefix, data.Username)
ret, err := m.ExecCtx(ctx, func(ctx context.Context, conn sqlx.SqlConn) (result sql.Result, err error) {
query := fmt.Sprintf("insert into %s (%s) values ($1, $2, $3, $4, $5, $6, $7, $8, $9)", m.table, usersRowsExpectAutoSet)
return conn.ExecCtx(ctx, query, data.UserId, data.Username, data.Passwd, data.Nickname, data.Phone, data.RoleType, data.IsVerified, data.State, data.DeletedAt)
}, publicUsersPhoneKey, publicUsersUserIdKey, publicUsersUsernameKey)
return ret, err
}
func (m *defaultUsersModel) Update(ctx context.Context, newData *Users) error {
data, err := m.FindOne(ctx, newData.UserId)
if err != nil {
return err
}
publicUsersPhoneKey := fmt.Sprintf("%s%v", cachePublicUsersPhonePrefix, data.Phone)
publicUsersUserIdKey := fmt.Sprintf("%s%v", cachePublicUsersUserIdPrefix, data.UserId)
publicUsersUsernameKey := fmt.Sprintf("%s%v", cachePublicUsersUsernamePrefix, data.Username)
_, err = m.ExecCtx(ctx, func(ctx context.Context, conn sqlx.SqlConn) (result sql.Result, err error) {
query := fmt.Sprintf("update %s set %s where user_id = $1", m.table, usersRowsWithPlaceHolder)
return conn.ExecCtx(ctx, query, newData.UserId, newData.Username, newData.Passwd, newData.Nickname, newData.Phone, newData.RoleType, newData.IsVerified, newData.State, newData.DeletedAt)
}, publicUsersPhoneKey, publicUsersUserIdKey, publicUsersUsernameKey)
return err
}
func (m *defaultUsersModel) formatPrimary(primary any) string {
return fmt.Sprintf("%s%v", cachePublicUsersUserIdPrefix, primary)
}
func (m *defaultUsersModel) queryPrimary(ctx context.Context, conn sqlx.SqlConn, v, primary any) error {
query := fmt.Sprintf("select %s from %s where user_id = $1 limit 1", usersRows, m.table)
return conn.QueryRowCtx(ctx, v, query, primary)
}
func (m *defaultUsersModel) tableName() string {
return m.table
}
+5
View File
@@ -0,0 +1,5 @@
package models
import "github.com/zeromicro/go-zero/core/stores/sqlx"
var ErrNotFound = sqlx.ErrNotFound
@@ -0,0 +1,55 @@
// Code generated by goctl. DO NOT EDIT.
// goctl 1.9.2
// Source: users.proto
package server
import (
"context"
"juwan-backend/app/users/rpc/internal/logic"
"juwan-backend/app/users/rpc/internal/svc"
"juwan-backend/app/users/rpc/pb"
)
type UsercenterServer struct {
svcCtx *svc.ServiceContext
pb.UnimplementedUsercenterServer
}
func NewUsercenterServer(svcCtx *svc.ServiceContext) *UsercenterServer {
return &UsercenterServer{
svcCtx: svcCtx,
}
}
// -----------------------users-----------------------
func (s *UsercenterServer) AddUsers(ctx context.Context, in *pb.AddUsersReq) (*pb.AddUsersResp, error) {
l := logic.NewAddUsersLogic(ctx, s.svcCtx)
return l.AddUsers(in)
}
func (s *UsercenterServer) UpdateUsers(ctx context.Context, in *pb.UpdateUsersReq) (*pb.UpdateUsersResp, error) {
l := logic.NewUpdateUsersLogic(ctx, s.svcCtx)
return l.UpdateUsers(in)
}
func (s *UsercenterServer) DelUsers(ctx context.Context, in *pb.DelUsersReq) (*pb.DelUsersResp, error) {
l := logic.NewDelUsersLogic(ctx, s.svcCtx)
return l.DelUsers(in)
}
func (s *UsercenterServer) GetUsersById(ctx context.Context, in *pb.GetUsersByIdReq) (*pb.GetUsersByIdResp, error) {
l := logic.NewGetUsersByIdLogic(ctx, s.svcCtx)
return l.GetUsersById(in)
}
func (s *UsercenterServer) GetUserByUsername(ctx context.Context, in *pb.GetUserByUsernameReq) (*pb.GetUserByUsernameResp, error) {
l := logic.NewGetUserByUsernameLogic(ctx, s.svcCtx)
return l.GetUserByUsername(in)
}
func (s *UsercenterServer) SearchUsers(ctx context.Context, in *pb.SearchUsersReq) (*pb.SearchUsersResp, error) {
l := logic.NewSearchUsersLogic(ctx, s.svcCtx)
return l.SearchUsers(in)
}
@@ -0,0 +1,47 @@
package svc
import (
"context"
"juwan-backend/app/users/rpc/internal/config"
"juwan-backend/app/users/rpc/internal/models"
"time"
"github.com/redis/go-redis/v9"
"github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/stores/sqlx"
)
type ServiceContext struct {
Config config.Config
UsersModel models.UsersModel
RedisCluster *redis.ClusterClient
}
func NewServiceContext(c config.Config) *ServiceContext {
conn := sqlx.NewSqlConn("postgres", c.DataSource)
logx.Infof("success to connect to postgres~")
// Initialize Redis Cluster client from CacheConf
var redisCluster *redis.ClusterClient
if len(c.CacheConf) > 0 {
redisCluster = redis.NewClusterClient(&redis.ClusterOptions{
Addrs: []string{c.CacheConf[0].Host},
Password: c.CacheConf[0].Pass,
})
// Test Redis Cluster connection
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := redisCluster.Ping(ctx).Err(); err != nil {
logx.Errorf("failed to connect to redis cluster: %v", err)
} else {
logx.Infof("success to connect to redis cluster~")
}
}
return &ServiceContext{
Config: c,
UsersModel: models.NewUsersModel(conn, c.CacheConf),
RedisCluster: redisCluster,
}
}
+30
View File
@@ -0,0 +1,30 @@
package utils
import (
"errors"
"time"
)
type TokenPayload struct {
UserId string
IsAdmin bool
}
const (
tokenCachePrefixUser = "jwt:user:"
tokenCachePrefixToken = "jwt:token:"
tokenCacheTTL = 60 * 24 * time.Hour
tokenLifetime = 5 * 24 * time.Hour
)
var (
errMissingToken = errors.New("token missing in request")
errInvalidToken = errors.New("invalid token claims")
errTokenNotInCache = errors.New("token not found in cache")
errNoRedisClient = errors.New("redis client not configured")
)
func NewToken(payload TokenPayload) (string, error) {
return "", nil
}
+5 -5
View File
@@ -4,10 +4,10 @@ import (
"flag"
"fmt"
"juwan-backend/app/user/rpc/internal/config"
"juwan-backend/app/user/rpc/internal/server"
"juwan-backend/app/user/rpc/internal/svc"
"juwan-backend/app/user/rpc/pb"
"juwan-backend/app/users/rpc/internal/config"
"juwan-backend/app/users/rpc/internal/server"
"juwan-backend/app/users/rpc/internal/svc"
"juwan-backend/app/users/rpc/pb"
"github.com/zeromicro/go-zero/core/conf"
"github.com/zeromicro/go-zero/core/service"
@@ -22,7 +22,7 @@ func main() {
flag.Parse()
var c config.Config
conf.MustLoad(*configFile, &c)
conf.MustLoad(*configFile, &c, conf.UseEnv())
ctx := svc.NewServiceContext(c)
s := zrpc.MustNewServer(c.RpcServerConf, func(grpcServer *grpc.Server) {
File diff suppressed because it is too large Load Diff
+313
View File
@@ -0,0 +1,313 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc v6.32.0
// source: users.proto
package pb
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
Usercenter_AddUsers_FullMethodName = "/pb.usercenter/AddUsers"
Usercenter_UpdateUsers_FullMethodName = "/pb.usercenter/UpdateUsers"
Usercenter_DelUsers_FullMethodName = "/pb.usercenter/DelUsers"
Usercenter_GetUsersById_FullMethodName = "/pb.usercenter/GetUsersById"
Usercenter_GetUserByUsername_FullMethodName = "/pb.usercenter/GetUserByUsername"
Usercenter_SearchUsers_FullMethodName = "/pb.usercenter/SearchUsers"
)
// UsercenterClient is the client API for Usercenter service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type UsercenterClient interface {
// -----------------------users-----------------------
AddUsers(ctx context.Context, in *AddUsersReq, opts ...grpc.CallOption) (*AddUsersResp, error)
UpdateUsers(ctx context.Context, in *UpdateUsersReq, opts ...grpc.CallOption) (*UpdateUsersResp, error)
DelUsers(ctx context.Context, in *DelUsersReq, opts ...grpc.CallOption) (*DelUsersResp, error)
GetUsersById(ctx context.Context, in *GetUsersByIdReq, opts ...grpc.CallOption) (*GetUsersByIdResp, error)
GetUserByUsername(ctx context.Context, in *GetUserByUsernameReq, opts ...grpc.CallOption) (*GetUserByUsernameResp, error)
SearchUsers(ctx context.Context, in *SearchUsersReq, opts ...grpc.CallOption) (*SearchUsersResp, error)
}
type usercenterClient struct {
cc grpc.ClientConnInterface
}
func NewUsercenterClient(cc grpc.ClientConnInterface) UsercenterClient {
return &usercenterClient{cc}
}
func (c *usercenterClient) AddUsers(ctx context.Context, in *AddUsersReq, opts ...grpc.CallOption) (*AddUsersResp, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(AddUsersResp)
err := c.cc.Invoke(ctx, Usercenter_AddUsers_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *usercenterClient) UpdateUsers(ctx context.Context, in *UpdateUsersReq, opts ...grpc.CallOption) (*UpdateUsersResp, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(UpdateUsersResp)
err := c.cc.Invoke(ctx, Usercenter_UpdateUsers_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *usercenterClient) DelUsers(ctx context.Context, in *DelUsersReq, opts ...grpc.CallOption) (*DelUsersResp, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(DelUsersResp)
err := c.cc.Invoke(ctx, Usercenter_DelUsers_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *usercenterClient) GetUsersById(ctx context.Context, in *GetUsersByIdReq, opts ...grpc.CallOption) (*GetUsersByIdResp, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetUsersByIdResp)
err := c.cc.Invoke(ctx, Usercenter_GetUsersById_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *usercenterClient) GetUserByUsername(ctx context.Context, in *GetUserByUsernameReq, opts ...grpc.CallOption) (*GetUserByUsernameResp, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetUserByUsernameResp)
err := c.cc.Invoke(ctx, Usercenter_GetUserByUsername_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *usercenterClient) SearchUsers(ctx context.Context, in *SearchUsersReq, opts ...grpc.CallOption) (*SearchUsersResp, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(SearchUsersResp)
err := c.cc.Invoke(ctx, Usercenter_SearchUsers_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// UsercenterServer is the server API for Usercenter service.
// All implementations must embed UnimplementedUsercenterServer
// for forward compatibility.
type UsercenterServer interface {
// -----------------------users-----------------------
AddUsers(context.Context, *AddUsersReq) (*AddUsersResp, error)
UpdateUsers(context.Context, *UpdateUsersReq) (*UpdateUsersResp, error)
DelUsers(context.Context, *DelUsersReq) (*DelUsersResp, error)
GetUsersById(context.Context, *GetUsersByIdReq) (*GetUsersByIdResp, error)
GetUserByUsername(context.Context, *GetUserByUsernameReq) (*GetUserByUsernameResp, error)
SearchUsers(context.Context, *SearchUsersReq) (*SearchUsersResp, error)
mustEmbedUnimplementedUsercenterServer()
}
// UnimplementedUsercenterServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedUsercenterServer struct{}
func (UnimplementedUsercenterServer) AddUsers(context.Context, *AddUsersReq) (*AddUsersResp, error) {
return nil, status.Errorf(codes.Unimplemented, "method AddUsers not implemented")
}
func (UnimplementedUsercenterServer) UpdateUsers(context.Context, *UpdateUsersReq) (*UpdateUsersResp, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateUsers not implemented")
}
func (UnimplementedUsercenterServer) DelUsers(context.Context, *DelUsersReq) (*DelUsersResp, error) {
return nil, status.Errorf(codes.Unimplemented, "method DelUsers not implemented")
}
func (UnimplementedUsercenterServer) GetUsersById(context.Context, *GetUsersByIdReq) (*GetUsersByIdResp, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetUsersById not implemented")
}
func (UnimplementedUsercenterServer) GetUserByUsername(context.Context, *GetUserByUsernameReq) (*GetUserByUsernameResp, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetUserByUsername not implemented")
}
func (UnimplementedUsercenterServer) SearchUsers(context.Context, *SearchUsersReq) (*SearchUsersResp, error) {
return nil, status.Errorf(codes.Unimplemented, "method SearchUsers not implemented")
}
func (UnimplementedUsercenterServer) mustEmbedUnimplementedUsercenterServer() {}
func (UnimplementedUsercenterServer) testEmbeddedByValue() {}
// UnsafeUsercenterServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to UsercenterServer will
// result in compilation errors.
type UnsafeUsercenterServer interface {
mustEmbedUnimplementedUsercenterServer()
}
func RegisterUsercenterServer(s grpc.ServiceRegistrar, srv UsercenterServer) {
// If the following call pancis, it indicates UnimplementedUsercenterServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&Usercenter_ServiceDesc, srv)
}
func _Usercenter_AddUsers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AddUsersReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(UsercenterServer).AddUsers(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Usercenter_AddUsers_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(UsercenterServer).AddUsers(ctx, req.(*AddUsersReq))
}
return interceptor(ctx, in, info, handler)
}
func _Usercenter_UpdateUsers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateUsersReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(UsercenterServer).UpdateUsers(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Usercenter_UpdateUsers_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(UsercenterServer).UpdateUsers(ctx, req.(*UpdateUsersReq))
}
return interceptor(ctx, in, info, handler)
}
func _Usercenter_DelUsers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DelUsersReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(UsercenterServer).DelUsers(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Usercenter_DelUsers_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(UsercenterServer).DelUsers(ctx, req.(*DelUsersReq))
}
return interceptor(ctx, in, info, handler)
}
func _Usercenter_GetUsersById_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetUsersByIdReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(UsercenterServer).GetUsersById(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Usercenter_GetUsersById_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(UsercenterServer).GetUsersById(ctx, req.(*GetUsersByIdReq))
}
return interceptor(ctx, in, info, handler)
}
func _Usercenter_GetUserByUsername_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetUserByUsernameReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(UsercenterServer).GetUserByUsername(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Usercenter_GetUserByUsername_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(UsercenterServer).GetUserByUsername(ctx, req.(*GetUserByUsernameReq))
}
return interceptor(ctx, in, info, handler)
}
func _Usercenter_SearchUsers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SearchUsersReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(UsercenterServer).SearchUsers(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Usercenter_SearchUsers_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(UsercenterServer).SearchUsers(ctx, req.(*SearchUsersReq))
}
return interceptor(ctx, in, info, handler)
}
// Usercenter_ServiceDesc is the grpc.ServiceDesc for Usercenter service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Usercenter_ServiceDesc = grpc.ServiceDesc{
ServiceName: "pb.usercenter",
HandlerType: (*UsercenterServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "AddUsers",
Handler: _Usercenter_AddUsers_Handler,
},
{
MethodName: "UpdateUsers",
Handler: _Usercenter_UpdateUsers_Handler,
},
{
MethodName: "DelUsers",
Handler: _Usercenter_DelUsers_Handler,
},
{
MethodName: "GetUsersById",
Handler: _Usercenter_GetUsersById_Handler,
},
{
MethodName: "GetUserByUsername",
Handler: _Usercenter_GetUserByUsername_Handler,
},
{
MethodName: "SearchUsers",
Handler: _Usercenter_SearchUsers_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "users.proto",
}
+81
View File
@@ -0,0 +1,81 @@
// Code generated by goctl. DO NOT EDIT.
// goctl 1.9.2
// Source: users.proto
package usercenter
import (
"context"
"juwan-backend/app/users/rpc/pb"
"github.com/zeromicro/go-zero/zrpc"
"google.golang.org/grpc"
)
type (
AddUsersReq = pb.AddUsersReq
AddUsersResp = pb.AddUsersResp
DelUsersReq = pb.DelUsersReq
DelUsersResp = pb.DelUsersResp
GetUserByUsernameReq = pb.GetUserByUsernameReq
GetUserByUsernameResp = pb.GetUserByUsernameResp
GetUsersByIdReq = pb.GetUsersByIdReq
GetUsersByIdResp = pb.GetUsersByIdResp
SearchUsersReq = pb.SearchUsersReq
SearchUsersResp = pb.SearchUsersResp
UpdateUsersReq = pb.UpdateUsersReq
UpdateUsersResp = pb.UpdateUsersResp
Users = pb.Users
Usercenter interface {
// -----------------------users-----------------------
AddUsers(ctx context.Context, in *AddUsersReq, opts ...grpc.CallOption) (*AddUsersResp, error)
UpdateUsers(ctx context.Context, in *UpdateUsersReq, opts ...grpc.CallOption) (*UpdateUsersResp, error)
DelUsers(ctx context.Context, in *DelUsersReq, opts ...grpc.CallOption) (*DelUsersResp, error)
GetUsersById(ctx context.Context, in *GetUsersByIdReq, opts ...grpc.CallOption) (*GetUsersByIdResp, error)
GetUserByUsername(ctx context.Context, in *GetUserByUsernameReq, opts ...grpc.CallOption) (*GetUserByUsernameResp, error)
SearchUsers(ctx context.Context, in *SearchUsersReq, opts ...grpc.CallOption) (*SearchUsersResp, error)
}
defaultUsercenter struct {
cli zrpc.Client
}
)
func NewUsercenter(cli zrpc.Client) Usercenter {
return &defaultUsercenter{
cli: cli,
}
}
// -----------------------users-----------------------
func (m *defaultUsercenter) AddUsers(ctx context.Context, in *AddUsersReq, opts ...grpc.CallOption) (*AddUsersResp, error) {
client := pb.NewUsercenterClient(m.cli.Conn())
return client.AddUsers(ctx, in, opts...)
}
func (m *defaultUsercenter) UpdateUsers(ctx context.Context, in *UpdateUsersReq, opts ...grpc.CallOption) (*UpdateUsersResp, error) {
client := pb.NewUsercenterClient(m.cli.Conn())
return client.UpdateUsers(ctx, in, opts...)
}
func (m *defaultUsercenter) DelUsers(ctx context.Context, in *DelUsersReq, opts ...grpc.CallOption) (*DelUsersResp, error) {
client := pb.NewUsercenterClient(m.cli.Conn())
return client.DelUsers(ctx, in, opts...)
}
func (m *defaultUsercenter) GetUsersById(ctx context.Context, in *GetUsersByIdReq, opts ...grpc.CallOption) (*GetUsersByIdResp, error) {
client := pb.NewUsercenterClient(m.cli.Conn())
return client.GetUsersById(ctx, in, opts...)
}
func (m *defaultUsercenter) GetUserByUsername(ctx context.Context, in *GetUserByUsernameReq, opts ...grpc.CallOption) (*GetUserByUsernameResp, error) {
client := pb.NewUsercenterClient(m.cli.Conn())
return client.GetUserByUsername(ctx, in, opts...)
}
func (m *defaultUsercenter) SearchUsers(ctx context.Context, in *SearchUsersReq, opts ...grpc.CallOption) (*SearchUsersResp, error) {
client := pb.NewUsercenterClient(m.cli.Conn())
return client.SearchUsers(ctx, in, opts...)
}
+233
View File
@@ -0,0 +1,233 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: user-rpc
namespace: juwan
labels:
app: user-rpc
spec:
replicas: 3
revisionHistoryLimit: 5
selector:
matchLabels:
app: user-rpc
template:
metadata:
labels:
app: user-rpc
spec:
serviceAccountName: find-endpoints
initContainers: # 等待数据库就绪的 Init Container 不影响资源使用但是影响调度策略(也可以忽略不计)
- name: wait-for-db
image: busybox:1.36
command:
[
"sh",
"-c",
'until nc -z -v -w5 user-db-rw 5432; do echo "Waiting for database..."; sleep 2; done;',
]
containers:
- name: user-rpc
image: user-rpc:v1
ports:
- containerPort: 9001
- containerPort: 4001
env:
- name: DB_URI
valueFrom:
secretKeyRef:
name: user-db-app
key: uri
- name: REDIS_HOST
value: "user-redis-sentinel-sentinel.juwan:26379"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: user-redis
key: password
readinessProbe:
tcpSocket:
port: 9001
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
tcpSocket:
port: 9001
initialDelaySeconds: 15
periodSeconds: 20
resources:
requests:
cpu: 500m
memory: 512Mi
limits:
cpu: 1000m
memory: 1024Mi
volumeMounts:
- name: timezone
mountPath: /etc/localtime
volumes:
- name: timezone
hostPath:
path: /usr/share/zoneinfo/Asia/Shanghai
---
apiVersion: v1
kind: Service
metadata:
name: user-rpc-svc
namespace: juwan
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "4001"
prometheus.io/path: "/metrics"
spec:
ports:
- name: rpc
port: 9001
targetPort: 9001
- name: metrics
port: 4001
targetPort: 4001
selector:
app: user-rpc
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: user-rpc-hpa-c
namespace: juwan
labels:
app: user-rpc-hpa-c
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: user-rpc
minReplicas: 3
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: user-rpc-hpa-m
namespace: juwan
labels:
app: user-rpc-hpa-m
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: user-rpc
minReplicas: 3
maxReplicas: 10
metrics:
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: 80
---
# Redis 主从复制
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisReplication
metadata:
name: user-redis
namespace: juwan
spec:
clusterSize: 3
kubernetesConfig:
image: quay.io/opstree/redis:v7.0.12
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
redisSecret:
name: user-redis
key: password
redisExporter:
enabled: true
image: quay.io/opstree/redis-exporter:latest
imagePullPolicy: Always
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
storage:
volumeClaimTemplate:
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
---
# Sentinel 监控
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisSentinel
metadata:
name: user-redis-sentinel
namespace: juwan
spec:
clusterSize: 3
kubernetesConfig:
image: quay.io/opstree/redis-sentinel:v7.0.12
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
redisSentinelConfig:
redisReplicationName: user-redis
masterGroupName: mymaster
redisPort: "6379"
quorum: "2"
downAfterMilliseconds: "5000"
failoverTimeout: "10000"
parallelSyncs: "1"
---
# PostgreSQL 集群
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
namespace: juwan
name: user-db
spec:
instances: 3
backup:
barmanObjectStore:
destinationPath: s3://juwan-dev-pg-backups-zj/pg-data/
endpointURL: https://cn-nb1.rains3.com
s3Credentials:
accessKeyId:
name: rc-creds
key: SOucqRaJr4OyfcIu
secretAccessKey:
name: rc-creds
key: tn2Agj9EowMwuPA9y7TdSL0AXKsMEz
wal:
compression: gzip
storage:
size: 1Gi
monitoring:
enablePodMonitor: true
+157
View File
@@ -0,0 +1,157 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: envoy-config
namespace: juwan
data:
envoy.yaml: |
static_resources:
listeners:
- name: http_listener
address:
socket_address:
address: 0.0.0.0
port_value: 8080
filter_chains:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
stat_prefix: ingress_http
codec_type: AUTO
route_config:
name: local_route
virtual_hosts:
- name: gozero_services
domains: ["*"]
routes:
- match:
prefix: "/"
route:
cluster: user-api
http_filters:
- name: envoy.filters.http.lua
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua
inline_code: |
math.randomseed(os.time())
local function is_safe(method)
return method == "GET" or method == "HEAD" or method == "OPTIONS"
end
local function parse_cookie(cookie_header)
if not cookie_header then
return nil
end
for cookie in string.gmatch(cookie_header, "([^;]+)") do
local k, v = cookie:match("^%s*([^=]+)=?(.*)$")
if k == "csrf_token" then
return v
end
end
return nil
end
local function random_token()
local t = {}
for i = 1, 32 do
t[i] = string.format("%x", math.random(0, 15))
end
return table.concat(t)
end
function envoy_on_request(request_handle)
local headers = request_handle:headers()
local method = headers:get(":method") or ""
local cookie = parse_cookie(headers:get("cookie"))
local csrf_header = headers:get("x-csrf-token")
if is_safe(method) then
if not cookie then
local token = random_token()
request_handle:streamInfo():dynamicMetadata():set("csrf", "token", token)
end
return
end
if not cookie or not csrf_header or cookie ~= csrf_header then
request_handle:respond({[":status"] = "403"}, "CSRF validation failed")
return
end
end
function envoy_on_response(response_handle)
local md = response_handle:streamInfo():dynamicMetadata():get("csrf") or {}
local token = md["token"]
if token then
response_handle:headers():add("set-cookie", "csrf_token=" .. token .. "; Path=/; SameSite=Strict")
end
end
- name: envoy.filters.http.router
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router
clusters:
- name: user-api
connect_timeout: 2s
type: STRICT_DNS
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: user-api
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: user-api-svc.juwan.svc.cluster.local
port_value: 8888
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: envoy-gateway
namespace: juwan
labels:
app: envoy-gateway
spec:
replicas: 1
selector:
matchLabels:
app: envoy-gateway
template:
metadata:
labels:
app: envoy-gateway
spec:
containers:
- name: envoy
image: envoyproxy/envoy:v1.32.2
args:
- "-c"
- "/etc/envoy/envoy.yaml"
- "--log-level"
- "info"
ports:
- name: http
containerPort: 8080
volumeMounts:
- name: config
mountPath: /etc/envoy
volumes:
- name: config
configMap:
name: envoy-config
---
apiVersion: v1
kind: Service
metadata:
name: envoy-gateway
namespace: juwan
spec:
type: ClusterIP
selector:
app: envoy-gateway
ports:
- name: http
port: 80
targetPort: 8080
+4
View File
@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: monitoring
+82
View File
@@ -0,0 +1,82 @@
apiVersion: v1
kind: Secret
metadata:
name: grafana-admin
namespace: monitoring
type: Opaque
data:
admin-user: YWRtaW4=
admin-password: Y2hhbmdlLW1l
---
apiVersion: v1
kind: ConfigMap
metadata:
name: grafana-datasources
namespace: monitoring
data:
datasources.yaml: |
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
access: proxy
url: http://prometheus:9090
isDefault: true
- name: Loki
type: loki
access: proxy
url: http://loki:3100
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: grafana
namespace: monitoring
spec:
replicas: 1
selector:
matchLabels:
app: grafana
template:
metadata:
labels:
app: grafana
spec:
containers:
- name: grafana
image: grafana/grafana:10.4.6
ports:
- name: http
containerPort: 3000
env:
- name: GF_SECURITY_ADMIN_USER
valueFrom:
secretKeyRef:
name: grafana-admin
key: admin-user
- name: GF_SECURITY_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: grafana-admin
key: admin-password
volumeMounts:
- name: datasources
mountPath: /etc/grafana/provisioning/datasources
volumes:
- name: datasources
configMap:
name: grafana-datasources
---
apiVersion: v1
kind: Service
metadata:
name: grafana
namespace: monitoring
spec:
type: ClusterIP
ports:
- name: http
port: 3000
targetPort: http
selector:
app: grafana
+90
View File
@@ -0,0 +1,90 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: loki-config
namespace: monitoring
data:
loki.yaml: |
auth_enabled: false
server:
http_listen_port: 3100
common:
path_prefix: /loki
storage:
filesystem:
chunks_directory: /loki/chunks
rules_directory: /loki/rules
replication_factor: 1
ring:
kvstore:
store: inmemory
schema_config:
configs:
- from: 2024-01-01
store: boltdb-shipper
object_store: filesystem
schema: v12
index:
prefix: index_
period: 24h
storage_config:
boltdb_shipper:
active_index_directory: /loki/index
cache_location: /loki/cache
shared_store: filesystem
ruler:
alertmanager_url: http://localhost:9093
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: loki
namespace: monitoring
spec:
replicas: 1
selector:
matchLabels:
app: loki
template:
metadata:
labels:
app: loki
spec:
containers:
- name: loki
image: grafana/loki:2.9.6
args:
- "-config.file=/etc/loki/loki.yaml"
ports:
- name: http
containerPort: 3100
volumeMounts:
- name: config
mountPath: /etc/loki
- name: data
mountPath: /loki
volumes:
- name: config
configMap:
name: loki-config
- name: data
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: loki
namespace: monitoring
spec:
type: ClusterIP
ports:
- name: http
port: 3100
targetPort: http
selector:
app: loki
+138
View File
@@ -0,0 +1,138 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus
namespace: monitoring
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: prometheus
rules:
- apiGroups: [""]
resources:
- nodes
- nodes/metrics
- services
- endpoints
- pods
- namespaces
verbs: ["get", "list", "watch"]
- apiGroups: ["extensions", "apps"]
resources:
- deployments
verbs: ["get", "list", "watch"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus
subjects:
- kind: ServiceAccount
name: prometheus
namespace: monitoring
---
apiVersion: v1
kind: ConfigMap
metadata:
name: prometheus-config
namespace: monitoring
data:
prometheus.yml: |
global:
scrape_interval: 15s
evaluation_interval: 15s
scrape_configs:
- job_name: "prometheus"
static_configs:
- targets: ["localhost:9090"]
- job_name: "kubernetes-annotated-endpoints"
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
action: keep
regex: "true"
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
action: replace
target_label: __scheme__
regex: (https?)
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
action: replace
target_label: __address__
regex: (.+):(?:\d+);(\d+)
replacement: $1:$2
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: namespace
- source_labels: [__meta_kubernetes_service_name]
action: replace
target_label: service
- source_labels: [__meta_kubernetes_endpoint_port_name]
action: replace
target_label: port
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: prometheus
namespace: monitoring
spec:
replicas: 1
selector:
matchLabels:
app: prometheus
template:
metadata:
labels:
app: prometheus
spec:
serviceAccountName: prometheus
containers:
- name: prometheus
image: prom/prometheus:v2.53.0
args:
- "--config.file=/etc/prometheus/prometheus.yml"
- "--storage.tsdb.path=/prometheus"
- "--storage.tsdb.retention.time=15d"
- "--web.enable-lifecycle"
ports:
- name: http
containerPort: 9090
volumeMounts:
- name: config
mountPath: /etc/prometheus
- name: data
mountPath: /prometheus
volumes:
- name: config
configMap:
name: prometheus-config
- name: data
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: prometheus
namespace: monitoring
spec:
type: ClusterIP
ports:
- name: http
port: 9090
targetPort: http
selector:
app: prometheus
+108
View File
@@ -0,0 +1,108 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: promtail
namespace: monitoring
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: promtail
rules:
- apiGroups: [""]
resources:
- pods
- namespaces
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: promtail
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: promtail
subjects:
- kind: ServiceAccount
name: promtail
namespace: monitoring
---
apiVersion: v1
kind: ConfigMap
metadata:
name: promtail-config
namespace: monitoring
data:
promtail.yaml: |
server:
http_listen_port: 9080
grpc_listen_port: 0
positions:
filename: /run/promtail/positions.yaml
clients:
- url: http://loki:3100/loki/api/v1/push
scrape_configs:
- job_name: kubernetes-pods
kubernetes_sd_configs:
- role: pod
relabel_configs:
- action: replace
source_labels: [__meta_kubernetes_pod_node_name]
target_label: node
- action: replace
source_labels: [__meta_kubernetes_namespace]
target_label: namespace
- action: replace
source_labels: [__meta_kubernetes_pod_name]
target_label: pod
- action: replace
source_labels: [__meta_kubernetes_pod_container_name]
target_label: container
- action: replace
source_labels: [__meta_kubernetes_pod_uid]
target_label: __path__
replacement: /var/log/pods/*$1/*/*.log
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: promtail
namespace: monitoring
spec:
selector:
matchLabels:
app: promtail
template:
metadata:
labels:
app: promtail
spec:
serviceAccountName: promtail
tolerations:
- operator: "Exists"
containers:
- name: promtail
image: grafana/promtail:2.9.6
args:
- "-config.file=/etc/promtail/promtail.yaml"
volumeMounts:
- name: config
mountPath: /etc/promtail
- name: positions
mountPath: /run/promtail
- name: varlog
mountPath: /var/log
readOnly: true
volumes:
- name: config
configMap:
name: promtail-config
- name: positions
emptyDir: {}
- name: varlog
hostPath:
path: /var/log
@@ -17,7 +17,7 @@ spec:
app: user-rpc
spec:
serviceAccountName: find-endpoints
initContainers:
initContainers: # 等待数据库就绪的 Init Container 不影响资源使用但是影响调度策略(也可以忽略不计)
- name: wait-for-db
image: busybox:1.36
command:
@@ -31,12 +31,20 @@ spec:
image: user-rpc:v1
ports:
- containerPort: 9001
- containerPort: 4001
env:
- name: DB_URI
valueFrom:
secretKeyRef:
name: user-db-app
key: uri
- name: REDIS_HOST
value: "user-redis.juwan:6379"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: user-redis
key: password
readinessProbe:
tcpSocket:
port: 9001
@@ -68,10 +76,18 @@ kind: Service
metadata:
name: user-rpc-svc
namespace: juwan
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "4001"
prometheus.io/path: "/metrics"
spec:
ports:
- port: 9001
- name: rpc
port: 9001
targetPort: 9001
- name: metrics
port: 4001
targetPort: 4001
selector:
app: user-rpc
@@ -120,8 +136,40 @@ spec:
target:
type: Utilization
averageUtilization: 80
---
# Redis Cluster
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisCluster
metadata:
name: user-redis
namespace: juwan
spec:
clusterSize: 3
kubernetesConfig:
image: quay.io/opstree/redis:v7.0.12
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
redisSecret:
name: user-redis
key: password
redisExporter:
enabled: true
image: quay.io/opstree/redis-exporter:latest
imagePullPolicy: Always
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
storage:
size: 1Gi
---
# PostgreSQL 集群
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
@@ -129,11 +177,6 @@ metadata:
name: user-db
spec:
instances: 3
postInitSQLRefs:
configMapRefs:
- name: db-dx-init-script
key: init-extensions-sql
optional: false
backup:
barmanObjectStore:
destinationPath: s3://juwan-dev-pg-backups-zj/pg-data/
+3
View File
@@ -0,0 +1,3 @@
kubectl create secret generic user-redis \
--from-literal=password=$(openssl rand -base64 12) \
--namespace juwan
-26
View File
@@ -1,26 +0,0 @@
syntax = "v1"
info (
author: "Asadz"
date: "2024-06-19"
version: "1.0"
)
type (
UserInfoReq {
UserId int64 `json:"userId"`
}
UserInfoResp {
UserId int64 `json:"userId"`
Nickname string `json:"nickname"`
}
)
service user-api {
@doc (
summary: "Get user infomaction by user id"
)
@handler userInfo
post /user/info (UserInfoReq) returns (UserInfoResp)
}
+124
View File
@@ -0,0 +1,124 @@
syntax = "v1"
info (
author: "Asadz"
date: "2024-06-19"
version: "1.0"
)
type (
RegisterReq {
Username string `json:"username" binding:"required,min=3,max=50"`
Password string `json:"password" binding:"required,min=6,max=128"`
Email string `json:"email,omitempty" binding:"omitempty,email"`
Phone string `json:"phone,omitempty" binding:"omitempty,len=11"`
}
RegisterResp {
UserId int64 `json:"userId"`
Username string `json:"username"`
Email string `json:"email"`
Message string `json:"message"`
}
LoginReq {
Username string `json:"username" binding:"required"`
Password string `json:"password" binding:"required"`
}
LoginResp {
UserId int64 `json:"userId"`
Username string `json:"username"`
Email string `json:"email"`
Token string `json:"token"`
Expires int64 `json:"expires"`
}
GetUserInfoReq {
UserId int64 `path:"userId" binding:"required,gt=0"`
}
UserInfo {
UserId int64 `json:"userId"`
Username string `json:"username"`
Email string `json:"email"`
Phone string `json:"phone"`
Avatar string `json:"avatar"`
Status int `json:"status"`
CreateAt int64 `json:"createAt"`
UpdateAt int64 `json:"updateAt"`
}
UpdateUserInfoReq {
UserId int64 `path:"userId" binding:"required,gt=0"`
Email string `json:"email" binding:"omitempty,email"`
Phone string `json:"phone" binding:"omitempty,len=11"`
Avatar string `json:"avatar" binding:"omitempty,url"`
}
UpdateUserInfoResp {
UserId int64 `json:"userId"`
Message string `json:"message"`
}
UpdatePasswordReq {
UserId int64 `path:"userId" binding:"required,gt=0"`
OldPassword string `json:"oldPassword" binding:"required"`
NewPassword string `json:"newPassword" binding:"required,min=6,max=128"`
}
UpdatePasswordResp {
Message string `json:"message"`
}
LogoutReq {
UserId int64 `path:"userId" binding:"required,gt=0"`
}
LogoutResp {
Message string `json:"message"`
}
ErrorResp {
Code int `json:"code"`
Message string `json:"message"`
}
)
@server (
group: user
prefix: /api/users
middleware: Logger
)
service user-api {
@doc (
summary: "用户注册接口"
description: "通过用户名、密码、邮箱、电话号码注册新用户账户"
)
@handler Register
post /register (RegisterReq) returns (RegisterResp)
@doc (
summary: "用户登录接口"
description: "使用用户名和密码进行登录,返回访问令牌和用户信息"
)
@handler Login
post /login (LoginReq) returns (LoginResp)
@doc (
summary: "获取用户信息"
description: "根据用户ID获取用户的详细信息,包含个人资料和账户状态"
)
@handler GetUserInfo
get /:userId (GetUserInfoReq) returns (UserInfo)
@doc (
summary: "修改用户信息"
description: "修改用户的邮箱、电话号码、头像等信息"
)
@handler UpdateUserInfo
put /:userId (UpdateUserInfoReq) returns (UpdateUserInfoResp)
@doc (
summary: "修改用户密码"
description: "验证旧密码后修改为新密码,需要提供原密码"
)
@handler UpdatePassword
put /:userId/password (UpdatePasswordReq) returns (UpdatePasswordResp)
@doc (
summary: "用户登出"
description: "清除用户的登录会话,使用户令牌失效"
)
@handler Logout
post /:userId/logout (LogoutReq) returns (LogoutResp)
}
-18
View File
@@ -1,18 +0,0 @@
syntax = "proto3";
option go_package = "./pb";
package pb;
message GetUserInfoReq {
int64 id = 1;
}
message GetUserInfoResp {
int64 id = 1;
string nickname = 2;
}
service Usercenter {
rpc GetUserInfo(GetUserInfoReq) returns (GetUserInfoResp);
}
+116
View File
@@ -0,0 +1,116 @@
syntax = "proto3";
option go_package ="./pb";
package pb;
// ------------------------------------
// Messages
// ------------------------------------
//--------------------------------users--------------------------------
message Users {
string userId = 1; //userId
string username = 2; //username
string passwd = 3; //passwd
string nickname = 4; //nickname
string phone = 5; //phone
int64 roleType = 6; //roleType
bool isVerified = 7; //isVerified
bool state = 8; //state
int64 createdAt = 9; //createdAt
int64 updatedAt = 10; //updatedAt
int64 deletedAt = 11; //deletedAt
}
message AddUsersReq {
string userId = 1; //userId
string username = 2; //username
string passwd = 3; //passwd
string nickname = 4; //nickname
string phone = 5; //phone
int64 roleType = 6; //roleType
bool isVerified = 7; //isVerified
bool state = 8; //state
int64 createdAt = 9; //createdAt
int64 updatedAt = 10; //updatedAt
int64 deletedAt = 11; //deletedAt
}
message AddUsersResp {
}
message UpdateUsersReq {
string userId = 1; //userId
string username = 2; //username
string passwd = 3; //passwd
string nickname = 4; //nickname
string phone = 5; //phone
int64 roleType = 6; //roleType
bool isVerified = 7; //isVerified
bool state = 8; //state
int64 createdAt = 9; //createdAt
int64 updatedAt = 10; //updatedAt
int64 deletedAt = 11; //deletedAt
}
message UpdateUsersResp {
}
message DelUsersReq {
int64 id = 1; //id
}
message DelUsersResp {
}
message GetUsersByIdReq {
int64 id = 1; //id
}
message GetUsersByIdResp {
Users users = 1; //users
}
message SearchUsersReq {
int64 page = 1; //page
int64 limit = 2; //limit
string userId = 3; //userId
string username = 4; //username
string passwd = 5; //passwd
string nickname = 6; //nickname
string phone = 7; //phone
int64 roleType = 8; //roleType
bool isVerified = 9; //isVerified
bool state = 10; //state
int64 createdAt = 11; //createdAt
int64 updatedAt = 12; //updatedAt
int64 deletedAt = 13; //deletedAt
}
message SearchUsersResp {
repeated Users users = 1; //users
}
message GetUserByUsernameReq {
string username = 1; //username
}
message GetUserByUsernameResp {
Users users = 1; //users
}
// ------------------------------------
// Rpc Func
// ------------------------------------
service usercenter{
//-----------------------users-----------------------
rpc AddUsers(AddUsersReq) returns (AddUsersResp);
rpc UpdateUsers(UpdateUsersReq) returns (UpdateUsersResp);
rpc DelUsers(DelUsersReq) returns (DelUsersResp);
rpc GetUsersById(GetUsersByIdReq) returns (GetUsersByIdResp);
rpc GetUserByUsername(GetUserByUsernameReq) returns (GetUserByUsernameResp);
rpc SearchUsers(SearchUsersReq) returns (SearchUsersResp);
}
+9 -4
View File
@@ -1,13 +1,18 @@
create extension if not exists "uuid-ossp";
create extension if not exists "pg_trgm";
CREATE TABLE users (
user_id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
create table users
(
user_id uuid primary key default uuid_generate_v4(),
username VARCHAR(50) UNIQUE NOT NULL,
passwd VARCHAR(255) NOT NULL,
nikename VARCHAR(50) NOT NULL,
nickname VARCHAR(50) NOT NULL,
phone VARCHAR(20) UNIQUE NOT NULL,
role_type SMALLINT NOT NULL, -- 1:玩家, 2:打手, 3:店长
is_verified BOOLEAN DEFAULT false,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
state BOOLEAN NOT NULL DEFAULT true,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
updated_at timestamp with time zone default current_timestamp,
deleted_at timestamp with time zone
);
+95
View File
@@ -0,0 +1,95 @@
# Envoy Gateway Configuration
This document explains how the Envoy gateway is configured and how to modify it.
## Files
- envoy.yaml: ConfigMap + Deployment + Service for Envoy
## Current Behavior
- Envoy listens on port 8080 in the Pod and exposes port 80 via a ClusterIP Service.
- All HTTP traffic is routed to user-api only.
- gRPC is not exposed by this gateway.
## Routing
In envoy.yaml, routes are defined under:
static_resources -> listeners -> http_connection_manager -> route_config -> virtual_hosts
The current routing rules are:
- All requests (prefix: "/") -> cluster: user-api
To add a new HTTP service, add a new route above the default route and define a new cluster.
Example: route /order to order-api-svc:8899
1) Add a route match:
- match:
prefix: "/order"
route:
cluster: order-api
2) Add a cluster:
- name: order-api
connect_timeout: 2s
type: STRICT_DNS
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: order-api
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: order-api-svc.juwan.svc.cluster.local
port_value: 8899
## CSRF Protection
Envoy uses a Lua filter for CSRF validation:
- Safe methods (GET/HEAD/OPTIONS):
- If csrf_token cookie is missing, Envoy generates one and sets it in the response.
- Unsafe methods (POST/PUT/PATCH/DELETE, etc):
- Requires BOTH:
- header: X-CSRF-Token
- cookie: csrf_token
- Values must match, otherwise Envoy returns 403.
If you want a different cookie name or header name, update these in the Lua code:
- Header: x-csrf-token
- Cookie: csrf_token
To relax or tighten rules, edit the functions:
- is_safe(method)
- envoy_on_request(request_handle)
## Cookie Attributes
Current Set-Cookie:
csrf_token=<value>; Path=/; SameSite=Strict
To add Secure or HttpOnly, update the string in envoy_on_response.
## Deployment
Apply or update:
kubectl apply -f deploy/k8s/envoy/envoy.yaml
## Common Changes
- Change listening port:
- Update listener port_value and Service targetPort/port.
- Change service namespace:
- Update cluster DNS addresses (e.g. service.ns.svc.cluster.local).
- Add more services:
- Add route + add cluster, as shown above.
+385
View File
@@ -0,0 +1,385 @@
# Kubernetes 部署问题排查与解决记录
**日期**: 2026年2月23日
**问题**: user-rpc 和 Redis 部署失败
**状态**: 已诊断,解决中
---
## 📋 问题描述
执行 `kubectl apply -f test.yaml` 后,资源虽然创建成功,但实际的应用 pods 并未正常运行:
```
kubectl apply -f ..\test.yaml
✓ deployment.apps/user-rpc created
✓ service/user-rpc-svc created
✓ horizontalpodautoscaler.autoscaling/user-rpc-hpa-c created
✓ horizontalpodautoscaler.autoscaling/user-rpc-hpa-m created
✓ redisreplication.redis.redis.opstreelabs.in/user-redis created
✓ redissentinel.redis.redis.opstreelabs.in/user-redis-sentinel created
✓ cluster.postgresql.cnpg.io/user-db created
```
但执行 `kubectl get all` 后,发现:
-**user-rpc pods 未创建**Deployment 0/3 replicas ready
-**Redis pods 未创建**RedisReplication 资源存在但无 pods
- ✅ user-db pods 正常运行(3/3
---
## 🔍 排查过程
### 第一步:检查 Deployment 状态
```bash
kubectl describe deployment user-rpc
```
**发现**
```
Conditions:
Type Status Reason
---- ------ ------
Progressing True NewReplicaSetCreated
Available False MinimumReplicasUnavailable
ReplicaFailure True FailedCreate
```
### 第二步:检查 ReplicaSet 详情
```bash
kubectl describe replicaset user-rpc-6bf77fbcd9
```
**发现关键错误**
```
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedCreate 3m53s replicaset-controller Error creating:
pods "user-rpc-6bf77fbcd9-" is forbidden: error looking up service
account default/find-endpoints: serviceaccount "find-endpoints" not found
```
**问题 #1 诊断完成**:❌ **缺失 ServiceAccount "find-endpoints"**
### 第三步:检查现有 ServiceAccounts
```bash
kubectl get serviceaccount
```
**结果**
```
NAME AGE
cluster-example 4d10h
default 13d
redis-operator 9h
user-db 4m9s
```
确认 `find-endpoints` 不存在。
### 第四步:检查 Secrets
```bash
kubectl get secrets
```
**结果**:默认 secrets 都存在,包括:
- ✅ user-db-app
- ✅ user-redis
- ✅ user-db-ca, user-db-replication, user-db-server
### 第五步:检查 Redis 部署
```bash
kubectl get redisreplication
kubectl get pods | grep redis
```
**发现**
- ✅ RedisReplication 资源存在
- ❌ Redis pods **完全没有被创建**
**问题 #2 诊断**:❌ **Redis Operator 未响应 RedisReplication 资源**
---
## 🔧 第一次修复尝试
### 创建缺失的 ServiceAccount
```bash
kubectl create serviceaccount find-endpoints
```
**结果**:✅ ServiceAccount 创建成功
### 重启 Deployment
```bash
kubectl rollout restart deployment user-rpc
```
**等待 5-10 秒后重新检查**
```bash
kubectl get pods -o wide
```
**新的发现**
```
NAME READY STATUS RESTARTS AGE
user-rpc-66f97fbdcc-ws7rc 0/1 ErrImagePull 0 26s
user-rpc-6bf77fbcd9-njm2z 0/1 ErrImagePull 0 29s
user-rpc-6bf77fbcd9-nwjtw 0/1 ImagePullBackOff 0 29s
user-rpc-6bf77fbcd9-wjrf8 0/1 ErrImagePull 0 29s
```
**好消息**:Pods 现在被创建了!(说明 ServiceAccount 问题已解决)
**新问题**:镜像拉取失败
---
## 🐛 根因分析
### 问题 #1:缺失 ServiceAccount ✅ 已解决
**根本原因**test.yaml 的 Deployment manifest 指定了:
```yaml
spec:
template:
spec:
serviceAccountName: find-endpoints # 这个 ServiceAccount 不存在
```
但没有在 test.yaml 中创建 ServiceAccount 资源。
**解决方案**
```bash
kubectl create serviceaccount find-endpoints
```
或在 test.yaml 中添加:
```yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: find-endpoints
namespace: default
```
---
### 问题 #2:镜像拉取失败 ❌ 需要修复
```bash
kubectl describe pod user-rpc-6bf77fbcd9-njm2z
```
**详细错误日志**
```
Events:
Warning Failed 38s kubelet Failed to pull image
"103.236.53.208:4418/library/user-rpc@sha256:76b27d3eb4d5d44e...":
Error response from daemon: Get "https://103.236.53.208:4418/v2/":
context deadline exceeded (Client.Timeout exceeded while awaiting headers)
Warning Failed 23s kubelet Failed to pull image
"103.236.53.208:4418/library/user-rpc@sha256:76b27d3eb4d5d44e...":
http: server gave HTTP response to HTTPS client
```
**根本原因分析**
1. **网络连接失败**`context deadline exceeded` - 无法连接到镜像仓库
2. **协议不匹配**`http: server gave HTTP response to HTTPS client` -
- 地址 `103.236.53.208:4418` 应该是 HTTP 而不是 HTTPS
- Docker daemon 尝试用 HTTPS 连接,但服务器使用 HTTP
**可能原因**
- 镜像仓库地址错误或不可访问
- 镜像仓库需要特定的网络配置
- 仓库服务器离线或配置不当
---
### 问题 #3:Redis 部署失败 ❌ 需要诊断
**现象**
- RedisReplication 和 RedisSentinel CRD 资源创建成功
- 但没有对应的 Redis pods 被创建
- `kubectl get pods | grep redis` 返回空
**可能原因**
1. **Redis Operator 未正常工作**
- Operator pod 可能存在问题
- Operator 未能监听到新的 RedisReplication 资源
2. **CRD 或 API 版本问题**
- manifest 中使用的 API 版本 `v1beta2` 可能不匹配 Operator 版本
3. **资源限制或权限问题**
- Operator 无权限创建 pods
- 集群资源限制阻止了 pod 创建
---
## ✅ 已执行的修复
| # | 问题 | 修复方法 | 状态 |
|---|------|---------|------|
| 1 | 缺失 ServiceAccount | `kubectl create serviceaccount find-endpoints` | ✅ 完成 |
| 2 | 镜像拉取失败 | 需要更新镜像地址或修复网络 | ⏳ 待处理 |
| 3 | Redis pods 未创建 | 需要诊断 Operator 日志 | ⏳ 待诊断 |
---
## 🚀 下一步解决方案
### 优先级 1:修复 user-rpc 镜像拉取
**选项 A:使用本地/内部镜像**
```yaml
# 修改 test.yaml 中的镜像地址
image: localhost:5000/user-rpc:latest # 本地私有仓库
# 或
image: user-rpc:latest # 本地镜像(如果已通过 docker load 导入)
```
**选项 B:修复仓库地址**
```yaml
# 如果 103.236.53.208:4418 确实是正确仓库
image: http://103.236.53.208:4418/library/user-rpc:latest # 显式使用 HTTP
```
**验证步骤**
```bash
# 检查镜像仓库连接性
curl -v http://103.236.53.208:4418/v2/
```
### 优先级 2:诊断 Redis Operator
```bash
# 查看 Operator 日志
kubectl logs -l app.kubernetes.io/name=redis-operator -f
# 查看 Operator pod
kubectl get pods -A | grep redis-operator
# 查看 RedisReplication 详细信息
kubectl describe redisreplication user-redis
# 检查 Operator 权限(RBAC
kubectl get role,rolebinding,clusterrole,clusterrolebinding | grep redis
```
### 优先级 3:增强 test.yaml
建议在 test.yaml 中添加缺失的资源定义:
```yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: find-endpoints
namespace: default
---
apiVersion: v1
kind: Secret
metadata:
name: registry-credentials
namespace: default
type: kubernetes.io/dockercfg
data:
.dockercfg: <base64-encoded-credentials> # 如果需要私有仓库认证
```
---
## 📊 当前集群状态
### Pods 状态总结
| 应用 | 期望副本 | 实际运行 | 状态 |
|------|---------|---------|------|
| user-db | 3 | 3 | ✅ 正常 |
| user-rpc | 3 | 0 | ❌ 镜像拉取失败 |
| Redis | 3 | 0 | ❌ Operator 未创建 |
| Sentinel | 3 | 0 | ❌ Operator 未创建 |
### Services 状态
```
✅ kubernetes (内置)
✅ user-rpc-svc:9001
✅ user-db-r:5432 (只读副本)
✅ user-db-ro:5432 (只读副本)
✅ user-db-rw:5432 (读写主副本)
```
### HPA 配置
```
✅ user-rpc-hpa-c (CPU 目标: 80%) - 无法工作(pods 未运行)
✅ user-rpc-hpa-m (Memory 目标: 80%) - 无法工作(pods 未运行)
```
---
## 📝 关键命令速查表
```bash
# 查看 Deployment 状态
kubectl describe deployment user-rpc
# 查看 ReplicaSet 错误事件
kubectl describe replicaset user-rpc-6bf77fbcd9
# 查看 Pod 详细错误
kubectl describe pod user-rpc-6bf77fbcd9-njm2z
# 查看 Pod 日志
kubectl logs user-rpc-6bf77fbcd9-njm2z
# 查看所有事件(按时间排序)
kubectl get events --sort-by='.lastTimestamp'
# 查看特定命名空间的所有资源
kubectl get all -n default
# 重新启动 deployment(强制重新创建 pods
kubectl rollout restart deployment user-rpc
# 查看 Operator 日志
kubectl logs -l app.kubernetes.io/name=redis-operator
# 检查 CRD 注册状态
kubectl api-resources | grep redis
```
---
## 🎯 总结
| 问题 | 原因 | 解决状态 |
|------|------|---------|
| **ServiceAccount 缺失** | manifest 中声明但未创建 | ✅ **已解决** |
| **镜像拉取失败** | 仓库地址不可达或协议不匹配 | ⏳ **待处理** |
| **Redis 未部署** | Operator 未响应 CRD | ⏳ **待诊断** |
**建议行动**
1. 确认/修复 user-rpc 镜像地址
2. 诊断 Redis Operator 状态
3. 验证所有依赖的 ServiceAccounts 和 Secrets 是否存在
4. 考虑在 test.yaml 中添加完整的资源定义,避免手工创建
File diff suppressed because it is too large Load Diff
+743
View File
@@ -0,0 +1,743 @@
# Redis Kubernetes Service 详细解析
**问题:** 为什么 Redis 有 8 个 Service,但应用配置中只使用 `user-redis-sentinel-sentinel.juwan.svc.cluster.local:26379`
**日期:** 2026年2月22日
---
## 📋 目录
1. [Service 概览](#service-概览)
2. [Kubernetes Service 基础](#kubernetes-service-基础)
3. [8 个 Service 的详细说明](#8-个-service-的详细说明)
4. [为什么使用哪个 Service](#为什么使用哪个-service)
5. [Service 创建原理](#service-创建原理)
6. [网络流量路由](#网络流量路由)
7. [故障排查](#故障排查)
---
## 📊 Service 概览
### 当前 Redis 的 8 个 Service
```bash
$ kubectl get svc -n juwan | grep redis
NAME TYPE CLUSTER-IP PORTS
user-redis ClusterIP 10.103.91.84 6379/TCP,9121/TCP 33m
user-redis-additional ClusterIP 10.107.228.48 6379/TCP 33m
user-redis-headless ClusterIP None 6379/TCP 33m
user-redis-master ClusterIP 10.97.120.76 6379/TCP 33m
user-redis-replica ClusterIP 10.100.213.103 6379/TCP 33m
user-redis-sentinel-sentinel ClusterIP 10.105.28.231 26379/TCP 32m
user-redis-sentinel-sentinel-additional ClusterIP 10.97.111.42 26379/TCP 32m
user-redis-sentinel-sentinel-headless ClusterIP None 26379/TCP 32m
```
### 按功能分类
| 分类 | Service 名称 | 作用 |
|-----|-------------|------|
| **Redis 数据层** | user-redis | 通用入口 |
| | user-redis-additional | 备用入口 |
| | user-redis-master | 主节点专用 |
| | user-redis-replica | 从节点专用 |
| | user-redis-headless | Pod 间通信 |
| **Sentinel 监控层** | user-redis-sentinel-sentinel | Sentinel 入口 ⭐ |
| | user-redis-sentinel-sentinel-additional | 备用入口 |
| | user-redis-sentinel-sentinel-headless | Sentinel 间通信 |
---
## 🔷 Kubernetes Service 基础
### Service 的作用
**Kubernetes 中的 Service 是什么?**
```
┌─────────────────────────────────────────────────┐
│ Kubernetes Cluster │
│ │
│ Service (虚拟 IP + DNS) │
│ ↓ │
│ Endpoints (实际 Pod IP 列表) │
│ ├─ 10.244.0.10:6379 (Pod 1) │
│ ├─ 10.244.1.20:6379 (Pod 2) │
│ └─ 10.244.2.30:6379 (Pod 3) │
│ │
│ 客户端 ──→ Service IP (稳定) ──→ Pod IP (变化) │
└─────────────────────────────────────────────────┘
```
### Service 的三种类型
| 类型 | CLUSTER-IP | 用途 | 示例 |
|-----|-----------|------|------|
| **ClusterIP** | ✅ 有 | 集群内访问 | 10.103.91.84 |
| **ClusterIP<br/>(Headless)** | ❌ None | Pod 间直接通信 | None |
| **NodePort** | ✅ 有 | 集群外访问 | 10.103.91.84 |
---
## 🔍 8 个 Service 的详细说明
### 第一组:Redis 数据层 Service(端口 6379
#### 1️⃣ user-redisClusterIP
**基本信息:**
```yaml
名称: user-redis
类型: ClusterIP (有负载均衡)
Cluster IP: 10.103.91.84
端口: 6379/TCP, 9121/TCP
DNS: user-redis.juwan.svc.cluster.local
```
**Endpoints 信息:**
```bash
$ kubectl get endpoints user-redis -n juwan
NAME ENDPOINTS
user-redis 10.244.0.10:6379,10.244.1.20:6379,10.244.2.30:6379
```
**负载均衡机制:**
```
客户端请求 ──→ Service IP (10.103.91.84)
kube-proxy (iptables/ipvs)
随机选择一个 Pod
├─ 10.244.0.10 (redis-0)
├─ 10.244.1.20 (redis-1) ← 可能
└─ 10.244.2.30 (redis-2)
```
**特点:**
- ✅ 对所有 Pod 轮询负载均衡
- ✅ 包含 Redis 数据服务(6379)和 Exporter9121
- ⚠️ 可能把写请求轮询到从节点导致失败
**适用场景:**
- 监控抓取(Prometheus 从 9121 端口抓指标)
- 不关心读写分离的简单查询
**为什么有 2 个端口?**
```
6379: Redis 数据服务
9121: Prometheus Exporter 监控端口
└─ 暴露 Redis 性能指标给 Prometheus
(redis_up, redis_memory_used, etc.)
```
**不用这个的原因:**
```
❌ 如果直接使用 user-redis 进行读写:
├─ 写请求可能被路由到从节点 (error)
├─ 无法进行故障自动转移
└─ 依赖于手动更新配置
```
---
#### 2️⃣ user-redis-additionalClusterIP
**基本信息:**
```yaml
名称: user-redis-additional
类型: ClusterIP (有负载均衡)
Cluster IP: 10.107.228.48
端口: 6379/TCP
Endpoints: 同 user-redis
```
**作用:**
- 功能完全同 `user-redis`
- 提供额外的访问入口
- 用于多租户/网络隔离场景
**为什么有这个?**
```
场景:某些网络策略可能只允许访问特定 Service
└─ 额外的 Service 提供备用入口
```
**不常用的原因:**
- 大多数场景用 `user-redis` 就足够
- `user-redis-additional` 是备用
---
#### 3️⃣ user-redis-headlessClusterIP: None
**基本信息:**
```yaml
名称: user-redis-headless
类型: ClusterIP (Headless Service)
Cluster IP: None ← 关键:无虚拟 IP
端口: 6379/TCP
DNS: user-redis-headless.juwan.svc.cluster.local
```
**特殊之处:无虚拟 IP**
```bash
# 正常 Service 查询返回虚拟 IP
$ nslookup user-redis.juwan.svc.cluster.local
Name: user-redis.juwan.svc.cluster.local
Address: 10.103.91.84 ← 虚拟 IP
# Headless Service 查询返回所有 Pod IP
$ nslookup user-redis-headless.juwan.svc.cluster.local
Name: user-redis-headless.juwan.svc.cluster.local
Address: 10.244.0.10 ← Pod 1 实际 IP
Address: 10.244.1.20 ← Pod 2 实际 IP
Address: 10.244.2.30 ← Pod 3 实际 IP
```
**使用场景:**
```
┌─────────────────────────────────────────────────┐
│ StatefulSet (Redis Cluster/Replication) │
│ │
│ redis-0 (主) redis-1 (从) redis-2 (从) │
│ ↓ ↓ ↓ │
│ 10.244.0.10 10.244.1.20 10.244.2.30 │
│ ↑ │
│ 需要直接连接到特定 Pod: │
│ redis-0.user-redis-headless (连接主节点) │
│ redis-1.user-redis-headless (连接从节点) │
└─────────────────────────────────────────────────┘
```
**谁在使用?**
- Redis 主从复制:从节点需要连接到已知的主节点
- Sentinel 监控:需要直接访问特定 Redis 实例
- Redis Operator 内部使用
**为什么应用不用这个?**
```
❌ Pod DNS 只能在 Pod 内使用
└─ 外部应用不知道 Pod 的具体 DNS 名称
✅ 用虚拟 Service IP 的优势
└─ 无需关心底层 Pod 变化
```
---
#### 4️⃣ user-redis-masterClusterIP
**基本信息:**
```yaml
名称: user-redis-master
类型: ClusterIP
Cluster IP: 10.97.120.76
端口: 6379/TCP
Endpoints: 10.244.0.10:6379 (只有 1 个 Pod)
DNS: user-redis-master.juwan.svc.cluster.local
```
**特点:只指向主节点**
```bash
$ kubectl get endpoints user-redis-master -n juwan
NAME ENDPOINTS
user-redis-master 10.244.0.10:6379 ← 仅主节点
```
**对比所有 Endpoints**
```
user-redis-master: 10.244.0.10 (主)
user-redis-replica: 10.244.1.20, 10.244.2.30 (从)
user-redis: 所有 Pod
```
**为什么分开?**
```
┌─────────────────────────────────────────┐
│ Redis 主从架构 │
│ │
│ Redis Master (10.244.0.10) │
│ ├─ 处理所有写操作 │
│ └─ 赋值数据给 Slave │
│ │
│ Redis Slave 1 (10.244.1.20) │
│ └─ 处理只读操作 │
│ │
│ Redis Slave 2 (10.244.2.30) │
│ └─ 处理只读操作 │
└─────────────────────────────────────────┘
请求分类:
┌───────────────────────┐
│ SET key value │ ──→ user-redis-master (10.97.120.76)
│ HSET user:1 name john │
└───────────────────────┘
┌───────────────────────┐
│ GET key │ ──→ user-redis-replica (10.100.213.103)
│ HGET user:1 name │
└───────────────────────┘
```
**适用场景:**
- ✅ 读写分离架构
- ✅ 优化读性能(从节点处理读)
- ✅ 减轻主节点负担
**为什么应用通常不直接用?**
```
❌ 需要在应用层面区分读写操作
├─ 写操作 → user-redis-master
├─ 只读操作 → user-redis-replica
└─ 代码复杂度高
✅ Sentinel 模式自动处理
└─ 应用无需关心主从区别
```
---
#### 5️⃣ user-redis-replicaClusterIP
**基本信息:**
```yaml
名称: user-redis-replica
类型: ClusterIP
Cluster IP: 10.100.213.103
端口: 6379/TCP
Endpoints: 10.244.1.20:6379, 10.244.2.30:6379 (两个从节点)
DNS: user-redis-replica.juwan.svc.cluster.local
```
**特点:只指向从节点,支持负载均衡**
```bash
$ kubectl get endpoints user-redis-replica -n juwan
NAME ENDPOINTS
user-redis-replica 10.244.1.20:6379, 10.244.2.30:6379
```
**读流量分散:**
```
应用发送 GET 请求
user-redis-replica (10.100.213.103)
随机选择一个从节点
├─ 10.244.1.20 (redis-1) ← 可能
└─ 10.244.2.30 (redis-2) ← 可能
```
**适用场景:**
- 除了 Sentinel 模式外的读优化
- 需要手动管理读写分离
---
### 第二组:Sentinel 监控层 Service(端口 26379
#### 6️⃣ user-redis-sentinel-sentinelClusterIP)⭐⭐⭐
**基本信息:**
```yaml
名称: user-redis-sentinel-sentinel
类型: ClusterIP
Cluster IP: 10.105.28.231
端口: 26379/TCP
Endpoints: 10.244.0.50:26379, 10.244.1.70:26379, 10.244.2.90:26379
(3 个 Sentinel 实例)
DNS: user-redis-sentinel-sentinel.juwan.svc.cluster.local
```
**为什么应用使用这个?**
```
应用程序配置:
┌──────────────────────────────────────────────┐
│ Redis: │
│ Host: user-redis-sentinel-sentinel │
│ Port: 26379 │
│ Type: sentinel │
│ MasterName: mymaster │
└──────────────────────────────────────────────┘
连接流程:
┌─────────────────────────────────────────────┐
│ 应用程序 │
└────────────────────┬────────────────────────┘
┌─────────────────────────────────────────────┐
│ user-redis-sentinel-sentinel (26379) │
│ ├─ Sentinel 1: 10.244.0.50:26379 │
│ ├─ Sentinel 2: 10.244.1.70:26379 │
│ └─ Sentinel 3: 10.244.2.90:26379 │
└────────────────────┬────────────────────────┘
应用询问: "mymaster 在哪?"
Sentinel 回答: "在 10.244.0.10:6379"
┌─────────────────────────────────────────────┐
│ Redis Master: 10.244.0.10:6379 │
│ (应用直接连接进行读写) │
└─────────────────────────────────────────────┘
故障转移过程:
Master 故障 → Sentinel 检测 → 提升新主节点
→ 应用下次查询时 → 获得新主节点 IP
→ 自动连接新主节点
```
**为什么这是最佳选择?**
1. **自动故障转移**
```
主节点宕机 (✗) → Sentinel 自动选举新主 → 应用自动连接
```
2. **高可用**
```
Sentinel 集群(3 个) → 任意 1-2 个故障仍可用
```
3. **应用无感知**
```
应用只需配置 MasterName: mymaster
无需关心主从地址变化
```
4. **标准做法**
```
✅ 业界公认的 Redis 高可用方案
✅ 最小化应用改动
✅ 自动化程度最高
```
**为什么不用其他 Service**
```
❌ user-redis-master/user-redis-replica
└─ 需要应用层区分读写,主从切换需要重启应用
❌ user-redis/user-redis-additional
└─ 没有故障转移能力,故障时应用会报错
✅ user-redis-sentinel-sentinel
└─ 自动发现新主节点,无需重启应用
```
---
#### 7️⃣ user-redis-sentinel-sentinel-additionalClusterIP
**说明:** 功能同 `user-redis-sentinel-sentinel`,备用入口
---
#### 8️⃣ user-redis-sentinel-sentinel-headlessClusterIP: None
**说明:** 供 Sentinel 内部通信和选举使用
---
## 🎯 为什么使用哪个 Service
### 应用配置选择
#### ⭐⭐⭐ Sentinel 模式(生产推荐)
```yaml
# 应用配置
Redis:
Host: user-redis-sentinel-sentinel.juwan.svc.cluster.local:26379
Type: sentinel
MasterName: mymaster
Pass: ${REDIS_PASSWORD}
```
**优势:**
- ✅ 自动故障转移(RTO < 30 秒)
- ✅ 应用无需重启
- ✅ 自动发现新主节点
- ✅ 生产标准做法
---
#### ⭐⭐ 主从分离模式(可选)
```yaml
# 应用配置(需要两个 host
Redis:
Master:
Host: user-redis-master.juwan.svc.cluster.local:6379
Slave:
Host: user-redis-replica.juwan.svc.cluster.local:6379
```
**适用场景:**
- 读写分离显著
- 对读性能有极高要求
**缺点:**
- 主从故障需手动切换
- 应用层复杂度高
---
#### ❌ 不推荐的做法
```yaml
# ❌ 直接连接单个节点
Redis:
Host: user-redis-0.user-redis-headless.juwan.svc.cluster.local:6379
# 问题:Pod 重启 IP 变化,需要更新配置
# ❌ 连接通用 Service(无故障转移)
Redis:
Host: user-redis.juwan.svc.cluster.local:6379
# 问题:无法自动转移,故障时应用报错
# ❌ 硬编码 Pod IP
Redis:
Host: 10.244.0.10:6379
# 问题:Pod 重启 IP 变化,应用立即不可用
```
---
## 🔌 Service 创建原理
### 为什么会自动创建这么多 Service?
**由 Redis Operator 自动创建:**
```go
// Redis Operator 逻辑(伪代码)
func CreateServicesForRedis(redis *RedisReplication) {
// 数据层 Service
CreateService("user-redis", AllRedisNodes)
CreateService("user-redis-additional", AllRedisNodes)
CreateService("user-redis-master", [MasterNode])
CreateService("user-redis-replica", [SlaveNodes])
CreateHeadlessService("user-redis-headless", AllRedisNodes)
// 监控层 Service
CreateService("user-redis-sentinel-sentinel", AllSentinelNodes)
CreateService("user-redis-sentinel-sentinel-additional", AllSentinelNodes)
CreateHeadlessService("user-redis-sentinel-sentinel-headless", AllSentinelNodes)
}
```
**为什么这样设计?**
| Service | 原因 |
|---------|------|
| 多个 ClusterIP | 不同场景需要不同的 Endpoints 配置 |
| 包含 additional | 网络隔离/多租户支持 |
| 包含 headless | StatefulSet 需要 Pod 间直接通信 |
**类比:**
```
Redis Operator 就像一个完整的产品
└─ 提供多种方式使用 Redis
├─ 简单: user-redis
├─ 高级: user-redis-master/replica
├─ HA: user-redis-sentinel-sentinel
└─ 内部: headless services
```
---
## 🌐 网络流量路由
### 查询 Service 背后的 Pod
**查看 Service Endpoints**
```bash
# 查看 user-redis 关联的 Pod
$ kubectl get endpoints user-redis -n juwan
NAME ENDPOINTS
user-redis 10.244.0.10:6379,10.244.1.20:6379,10.244.2.30:6379
# 查看 user-redis-master 关联的 Pod
$ kubectl get endpoints user-redis-master -n juwan
NAME ENDPOINTS
user-redis-master 10.244.0.10:6379
# 查看 user-redis-replica 关联的 Pod
$ kubectl get endpoints user-redis-replica -n juwan
NAME ENDPOINTS
user-redis-replica 10.244.1.20:6379,10.244.2.30:6379
```
**Pod 和 Service 的映射关系:**
```
Pods (实际运行的实例) Services (虚拟 IP)
└─ redis-0 (主) └─ user-redis (所有)
├─ 10.244.0.10 ├─ 10.103.91.84
└─ :6379
└─ user-redis-master (仅主)
└─ redis-1 (从) ├─ 10.97.120.76
├─ 10.244.1.20
└─ :6379
└─ user-redis-replica (仅从)
└─ redis-2 (从) ├─ 10.100.213.103
├─ 10.244.2.30
└─ :6379
```
**DNS 解析过程:**
```
应用 DNS 查询
└─ user-redis-master.juwan.svc.cluster.local
CoreDNS (Kubernetes DNS)
└─ 查询并返回 Service IP:
├─ 10.97.120.76 (user-redis-master)
├─ 或 10.100.213.103 (user-redis-replica)
├─ 或 10.103.91.84 (user-redis)
└─ 或 Sentinel 的 IP
```
**Sentinel 模式的特殊之处:**
```
应用查询 Sentinel
└─ user-redis-sentinel-sentinel.juwan.svc.cluster.local:26379
Sentinel Service (负载均衡到 3 个 Sentinel 节点)
Sentinel 节点 (任选一个)
应用询问: "mymaster 主节点 IP 是什么?"
Sentinel 回答: "10.244.0.10:6379"
应用直接连接 Redis Master: 10.244.0.10:6379
```
---
## 🔧 故障排查
### 问题 1:为什么应用连接失败?
**检查步骤:**
```bash
# 1. 验证 Service 存在
kubectl get svc user-redis-sentinel-sentinel -n juwan
# 2. 验证 Endpoints 不为空
kubectl get endpoints user-redis-sentinel-sentinel -n juwan
# 3. 测试 DNS 解析
kubectl run -it --rm nettest --image=busybox --restart=Never -n juwan -- \
nslookup user-redis-sentinel-sentinel.juwan.svc.cluster.local
# 4. 测试连接性
kubectl run -it --rm nettest --image=busybox --restart=Never -n juwan -- \
nc -zv user-redis-sentinel-sentinel.juwan.svc.cluster.local 26379
# 5. 查看应用日志
kubectl logs -f user-rpc-xxx -n juwan
```
### 问题 2:为什么看不到某个 Service?
```bash
# 确保在正确的命名空间
kubectl get svc -n juwan | grep redis
# 如果 Redis Operator 有问题,Service 可能不会创建
# 查看 Operator 日志
kubectl logs -n default deployment/redis-operator
```
### 问题 3Service IP 经常变化?
```bash
# Service IP 是稳定的(除非被删除和重建)
# 如果频繁变化,说明 Service 被频繁重建
# 检查 Service 创建事件
kubectl describe svc user-redis-sentinel-sentinel -n juwan
# 检查 Operator 是否有异常
kubectl describe redissentinel user-redis-sentinel -n juwan
```
---
## 📚 总结
### 快速理解
| Service | 用途 | 应用是否使用 |
|---------|------|-----------|
| **user-redis-sentinel-sentinel** | ⭐ Sentinel 高可用 | ✅ **生产推荐** |
| user-redis-master | 直连主节点 | ⚠️ 需要读写分离 |
| user-redis-replica | 直连从节点 | ⚠️ 需要读写分离 |
| user-redis | 通用入口 | ❌ 不推荐(无 HA) |
| headless services | 内部通信 | ❌ 应用不用 |
### 为什么有这么多 Service
**答案:** 为了提供灵活的使用方式
```
Redis Operator 的设计理念:
┌─────────────────────────────────────────┐
│ 提供完整的 Redis 高可用解决方案 │
│ │
│ ├─ 简单使用场景 │
│ │ └─ user-redis (所有节点) │
│ │ │
│ ├─ 高级使用场景 │
│ │ ├─ user-redis-master (写) │
│ │ └─ user-redis-replica (读) │
│ │ │
│ ├─ 生产场景 (推荐) │
│ │ └─ user-redis-sentinel-sentinel │
│ │ │
│ └─ 内部通信 │
│ └─ headless services │
└─────────────────────────────────────────┘
```
### 应用该用哪个?
**一句话:使用 `user-redis-sentinel-sentinel:26379` + Sentinel 模式**
```yaml
# 这是最佳实践
Redis:
Host: user-redis-sentinel-sentinel.juwan.svc.cluster.local:26379
Type: sentinel
MasterName: mymaster
```
**为什么?**
- ✅ 自动故障转移
- ✅ 应用无需重启
- ✅ 无需手工干预
- ✅ 行业标准
---
**文档版本:** 1.0
**创建日期:** 2026年2月22日
**维护者:** DevOps Team
+779
View File
@@ -0,0 +1,779 @@
# Redis Sentinel 部署问题诊断与修复报告
**问题日期:** 2026年2月22日
**命名空间:** juwan
**涉及资源:** user-rpc deployment, RedisSentinel
---
## 📋 目录
1. [问题背景](#问题背景)
2. [问题现象](#问题现象)
3. [诊断过程](#诊断过程)
4. [根因分析](#根因分析)
5. [解决方案](#解决方案)
6. [修复步骤](#修复步骤)
7. [验证结果](#验证结果)
8. [后续建议](#后续建议)
---
## 🎯 问题背景
### 部署目标
部署一个简单的三节点 Redis Sentinel 哨兵集群作为缓存服务,供 user-rpc 服务使用。后续如有需要再扩展为分片集群。
### 初始配置
`deploy/k8s/service/user/user-rpc.yaml` 中配置了:
- user-rpc Deployment3副本)
- user-rpc Service
- HPACPU和内存)
- **RedisSentinel 资源**
- PostgreSQL Cluster
---
## 🔴 问题现象
### 执行的操作
```bash
kubectl apply -f .\deploy\k8s\service\user\user-rpc.yaml
```
### 输出结果
```
deployment.apps/user-rpc configured
service/user-rpc-svc unchanged
horizontalpodautoscaler.autoscaling/user-rpc-hpa-c unchanged
horizontalpodautoscaler.autoscaling/user-rpc-hpa-m unchanged
redissentinel.redis.redis.opstreelabs.in/user-redis unchanged
cluster.postgresql.cnpg.io/user-db unchanged
```
### 观察到的异常
查看命名空间资源:
```bash
kubectl get all -n juwan
```
**发现:**
- ✅ user-api pods 正常运行
- ✅ user-rpc pods 正常运行
- ✅ PostgreSQL clusters 正常运行
- ❌ **没有任何 Redis 相关的 Pod**
- ❌ **没有 Redis Service**
---
## 🔍 诊断过程
### 步骤 1:检查 RedisSentinel 资源状态
**目的:** 确认 RedisSentinel 资源是否被成功创建
**命令:**
```bash
kubectl get redissentinel user-redis -n juwan
```
**输出:**
```
NAME AGE
user-redis 9m56s
```
**分析:**
- ✅ RedisSentinel 资源已创建
- ❌ 但没有创建任何 Pod
- **结论:** Operator 没有按照 RedisSentinel 规格创建实际资源
---
### 步骤 2:查看 RedisSentinel 详细信息
**目的:** 检查资源的详细配置和事件
**命令:**
```bash
kubectl describe redissentinel user-redis -n juwan
```
**关键输出:**
```yaml
API Version: redis.redis.opstreelabs.in/v1beta2
Kind: RedisSentinel
Metadata:
Creation Timestamp: 2026-02-22T12:41:47Z
Finalizers:
redisSentinelFinalizer
Generation: 2
Spec:
Cluster Size: 3
Redis Sentinel Config:
Redis Replication Name: user-redis # ⚠️ 关键配置
Master Group Name: myMaster
Redis Port: 6379
Quorum: 2
Events: <none> # ⚠️ 没有任何事件
```
**分析:**
- 配置中引用了 `redisReplicationName: user-redis`
- 没有任何 Events,说明 Operator 可能在等待某些前置条件
- **需要进一步检查依赖资源**
---
### 步骤 3:检查 Secret 资源
**目的:** 确认 RedisSentinel 所需的密码 Secret 是否存在
**命令:**
```bash
kubectl get secret user-redis -n juwan
```
**输出:**
```
NAME TYPE DATA AGE
user-redis Opaque 1 5h48m
```
**分析:**
- ✅ Secret 存在,排除认证配置问题
---
### 步骤 4:查找 Redis Pods
**目的:** 确认是否有 Redis Pod 使用不同的命名规则
**命令:**
```bash
kubectl get pods -n juwan | Select-String redis
```
**输出:**
```
(空输出 - 没有找到任何 Redis Pod)
```
**分析:**
- ❌ 确认没有任何 Redis Pod 被创建
---
### 步骤 5:检查 StatefulSet
**目的:** Redis 通常使用 StatefulSet 部署,检查是否有相关资源
**命令:**
```bash
kubectl get statefulset -n juwan
```
**输出:**
```
No resources found in juwan namespace.
```
**分析:**
- ❌ 没有 StatefulSet 被创建
- **结论:** Operator 完全没有执行创建动作
---
### 步骤 6:检查 Redis Operator 状态
**目的:** 确认 Redis Operator 本身是否正常运行
**命令:**
```bash
kubectl get pods -A | Select-String redis-operator
```
**输出:**
```
default redis-operator-7dcf4468c9-gfbcm 1/1 Running 0 4h50m
ot-operators redis-operator-69bb645f5b-6kxkv 0/1 ErrImageNeverPull 0 4h48m
```
**分析:**
- ✅ default namespace 中的 Operator 正常运行
- ⚠️ ot-operators namespace 中的 Operator 镜像拉取失败(但不影响我们的场景)
---
### 步骤 7:查看 Operator 日志
**目的:** 从 Operator 日志中寻找线索
**命令:**
```bash
kubectl logs redis-operator-7dcf4468c9-gfbcm -n default --tail=50
```
**关键输出:**
```json
{"level":"info","ts":"2026-02-22T08:01:56Z","msg":"Starting Controller","controller":"redissentinel"}
{"level":"info","ts":"2026-02-22T08:01:56Z","msg":"Starting workers","controller":"redissentinel","worker count":1}
```
**分析:**
- ✅ RedisSentinel Controller 已启动
- ✅ 没有错误日志
- ❌ 但也没有处理 user-redis 资源的日志
- **推测:** Operator 在等待某个依赖资源
---
### 步骤 8:检查 RedisReplication 资源(关键发现)
**目的:** 根据 RedisSentinel 配置中的 `redisReplicationName: user-redis`,检查对应的 RedisReplication 是否存在
**命令:**
```bash
kubectl get redisreplication -n juwan
```
**输出:**
```
No resources found in juwan namespace.
```
**分析:**
- ❌ **RedisReplication 资源不存在!**
- 🔎 **这就是问题的根本原因**
---
## 💡 根因分析
### 问题根源
**RedisSentinel 依赖 RedisReplication,但配置中只创建了 RedisSentinel,没有创建 RedisReplication。**
### Redis Operator 架构理解
在 OpsTree Redis Operator 中,资源之间的关系如下:
```
┌─────────────────────────────────────────┐
│ RedisSentinel (哨兵层) │
│ - 3个 Sentinel 节点 │
│ - 负责监控和自动故障转移 │
│ - 引用: redisReplicationName │
└──────────────┬──────────────────────────┘
│ 监控
┌─────────────────────────────────────────┐
│ RedisReplication (数据层) │
│ - 1个 Master + N个 Replica │
│ - 提供实际的缓存服务 │
│ - 主从复制 │
└─────────────────────────────────────────┘
```
### 错误配置的问题
原始配置直接创建了 RedisSentinel,但:
1. **缺少被监控对象:** Sentinel 需要监控一个 RedisReplication 集群
2. **引用不存在的资源:** `redisReplicationName: user-redis` 指向一个不存在的 RedisReplication
3. **Operator 行为:** Operator 发现依赖的 RedisReplication 不存在,因此不会创建 Sentinel Pod
### 为什么没有错误提示?
- CRD 验证只检查语法和字段类型
- 资源引用关系由 Operator 运行时检查
- Operator 采用了"等待依赖"策略,而不是报错
---
## ✅ 解决方案
### 正确的部署顺序
1. **先创建 RedisReplication**(建立 Redis 主从复制集群)
2. **再创建 RedisSentinel**(监控上述复制集群)
### 配置结构
```yaml
# 第一步:创建 Redis 主从复制(数据层)
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisReplication
metadata:
name: user-redis # Sentinel 将引用这个名称
namespace: juwan
spec:
clusterSize: 3 # 1 Master + 2 Replicas
kubernetesConfig:
image: quay.io/opstree/redis:v7.0.12
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
redisSecret:
name: user-redis
key: password
storage:
volumeClaimTemplate:
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi # 每个 Redis 节点 1GB 存储
---
# 第二步:创建 Sentinel 监控(监控层)
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisSentinel
metadata:
name: user-redis-sentinel # 使用不同的名称避免混淆
namespace: juwan
spec:
clusterSize: 3 # 3个 Sentinel 节点(推荐奇数)
kubernetesConfig:
image: quay.io/opstree/redis-sentinel:v7.0.12 # 使用 Sentinel 专用镜像
redisSentinelConfig:
redisReplicationName: user-redis # 引用上面的 RedisReplication
masterGroupName: mymaster
quorum: "2" # 需要 2 个 Sentinel 同意才能进行故障转移
```
---
## 🔧 修复步骤
### 步骤 1:删除错误的 RedisSentinel 资源
**命令:**
```bash
kubectl delete redissentinel user-redis -n juwan
```
**输出:**
```
redissentinel.redis.redis.opstreelabs.in "user-redis" deleted
```
**说明:** 删除仅创建了 CRD 实例但未创建实际 Pod 的资源
---
### 步骤 2:更新配置文件
修改 `deploy/k8s/service/user/user-rpc.yaml`,将单独的 RedisSentinel 替换为:
1. RedisReplication(数据层)
2. RedisSentinel(监控层)
**变更内容:**
- 添加 `RedisReplication` 资源定义
- 添加 `storage.volumeClaimTemplate` 配置
- 修改 RedisSentinel 的 `metadata.name``user-redis-sentinel`
- 使用正确的 Sentinel 镜像:`quay.io/opstree/redis-sentinel:v7.0.12`
- 完善 Sentinel 配置参数
---
### 步骤 3:应用更新后的配置
**命令:**
```bash
kubectl apply -f .\deploy\k8s\service\user\user-rpc.yaml
```
**输出:**
```
deployment.apps/user-rpc configured
service/user-rpc-svc unchanged
horizontalpodautoscaler.autoscaling/user-rpc-hpa-c unchanged
horizontalpodautoscaler.autoscaling/user-rpc-hpa-m unchanged
redisreplication.redis.redis.opstreelabs.in/user-redis created ✅
redissentinel.redis.redis.opstreelabs.in/user-redis-sentinel created ✅
cluster.postgresql.cnpg.io/user-db unchanged
```
**分析:**
- ✅ RedisReplication 成功创建
- ✅ RedisSentinel 成功创建
- 🎯 两个资源都是新创建(created),符合预期
---
## ✅ 验证结果
### 验证 1:检查 Pod 创建情况(等待 30 秒)
**命令:**
```bash
kubectl get statefulset,pods -n juwan | Select-String -Pattern "user-redis|NAME"
```
**输出:**
```
NAME READY AGE
statefulset.apps/user-redis 3/3 81s ✅
statefulset.apps/user-redis-sentinel-sentinel 3/3 24s ✅
NAME READY STATUS RESTARTS AGE
pod/user-redis-0 2/2 Running 0 80s ✅
pod/user-redis-1 2/2 Running 0 52s ✅
pod/user-redis-2 2/2 Running 0 47s ✅
pod/user-redis-sentinel-sentinel-0 1/1 Running 0 24s ✅
pod/user-redis-sentinel-sentinel-1 1/1 Running 0 8s ✅
pod/user-redis-sentinel-sentinel-2 1/1 Running 0 5s ✅
```
**分析:**
- ✅ **RedisReplication** 创建了 3 个 Poduser-redis-0/1/2
- 每个 Pod 有 2 个容器(2/2):Redis + Exporter
- 所有 Pod 处于 Running 状态
- ✅ **RedisSentinel** 创建了 3 个 Poduser-redis-sentinel-sentinel-0/1/2
- 每个 Pod 有 1 个容器(1/1):Sentinel
- 所有 Pod 处于 Running 状态
- ✅ 创建了 2 个 StatefulSetREADY 状态为 3/3
---
### 验证 2:检查 Service 资源
**命令:**
```bash
kubectl get svc -n juwan | Select-String -Pattern "redis|NAME"
```
**输出:**
```
NAME TYPE CLUSTER-IP PORT(S) AGE
user-redis ClusterIP 10.103.91.84 6379/TCP,9121/TCP 95s ✅
user-redis-additional ClusterIP 10.107.228.48 6379/TCP 95s
user-redis-headless ClusterIP None 6379/TCP 95s ✅
user-redis-master ClusterIP 10.97.120.76 6379/TCP 95s ✅
user-redis-replica ClusterIP 10.100.213.103 6379/TCP 95s ✅
user-redis-sentinel-sentinel ClusterIP 10.105.28.231 26379/TCP 40s ✅
user-redis-sentinel-sentinel-additional ClusterIP 10.97.111.42 26379/TCP 39s
user-redis-sentinel-sentinel-headless ClusterIP None 26379/TCP 41s
```
**Service 功能说明:**
#### Redis 数据层 Service(端口 6379
- **user-redis-master**: 主节点服务,用于写操作
- **user-redis-replica**: 从节点服务,用于读操作
- **user-redis**: 通用访问入口(负载均衡到所有节点)
- **user-redis-headless**: 无头服务,用于 StatefulSet Pod 间通信
- **user-redis-additional**: 额外的访问入口
#### Sentinel 监控层 Service(端口 26379
- **user-redis-sentinel-sentinel**: Sentinel 访问入口
- **user-redis-sentinel-sentinel-headless**: Sentinel 节点间通信
- **user-redis-sentinel-sentinel-additional**: 额外的 Sentinel 访问入口
---
### 验证 3:检查完整的集群状态
**命令:**
```bash
kubectl get all -n juwan
```
**最终状态统计:**
| 资源类型 | 名称 | 数量 | 状态 |
|---------|------|------|------|
| **Deployment** | user-api | 3/3 | ✅ Running |
| **Deployment** | user-rpc | 3/3 | ✅ Running |
| **StatefulSet** | cluster-example (PostgreSQL) | 3/3 | ✅ Running |
| **StatefulSet** | user-db (PostgreSQL) | 3/3 | ✅ Running |
| **StatefulSet** | user-redis (Redis 数据) | 3/3 | ✅ Running |
| **StatefulSet** | user-redis-sentinel-sentinel | 3/3 | ✅ Running |
**Pod 总计:** 18 个(全部 Running
**Service 总计:** 13 个
**HPA 总计:** 6 个
---
## 📊 架构图
### 部署后的 Redis 架构
```
┌────────────────────────────────────────────────────────────┐
│ 应用层 (user-rpc) │
│ │
│ [需要添加 Redis 连接配置] │
└──────────┬─────────────────────────────┬───────────────────┘
│ │
│ 写操作 │ 读操作
↓ ↓
┌─────────────┐ ┌─────────────┐
│ user-redis- │ │ user-redis- │
│ master │ │ replica │
│ Service │ │ Service │
└─────────────┘ └─────────────┘
│ │
└──────────┬──────────────────┘
┌──────────────────────────────────────────┐
│ RedisReplication (数据层) │
│ │
│ ┌──────────┐ ┌──────────┐ ┌───────┐ │
│ │ Master │→ │ Replica │→ │Replica│ │
│ │ redis-0 │ │ redis-1 │ │redis-2│ │
│ └──────────┘ └──────────┘ └───────┘ │
└──────────────────────────────────────────┘
│ 监控 & 故障转移
┌──────────────────────────────────────────┐
│ RedisSentinel (监控层) │
│ │
│ ┌──────────┐ ┌──────────┐ ┌───────┐ │
│ │Sentinel-0│ │Sentinel-1│ │Sentinel-2│
│ └──────────┘ └──────────┘ └───────┘ │
│ │
│ Quorum: 2/3 (多数派决策) │
└──────────────────────────────────────────┘
```
---
## 📝 后续建议
### 1. 应用集成 Redis
user-rpc 服务目前还没有配置 Redis 连接,需要:
#### 修改配置文件 `app/users/rpc/etc/pb.yaml`
```yaml
Name: pb.rpc
ListenOn: 0.0.0.0:8080
# 添加 Redis 配置(使用 Sentinel 模式)
Redis:
- Host: user-redis-sentinel-sentinel:26379
Type: sentinel
MasterName: mymaster
Pass: ${REDIS_PASSWORD}
# 或使用主从模式
# Redis:
# - Host: user-redis-master:6379 # 写
# Type: node
# Pass: ${REDIS_PASSWORD}
# - Host: user-redis-replica:6379 # 读
# Type: node
# Pass: ${REDIS_PASSWORD}
Etcd:
Hosts:
- etcd-service:2379 # 需要配置实际的 Etcd 地址
Key: pb.rpc
```
#### 修改 Config 结构 `app/users/rpc/internal/config/config.go`
```go
package config
import (
"github.com/zeromicro/go-zero/core/stores/redis"
"github.com/zeromicro/go-zero/zrpc"
)
type Config struct {
zrpc.RpcServerConf
Redis redis.RedisConf // 添加 Redis 配置
}
```
#### 初始化 Redis 客户端 `app/users/rpc/internal/svc/serviceContext.go`
```go
package svc
import (
"github.com/zeromicro/go-zero/core/stores/redis"
"juwan-backend/app/users/rpc/internal/config"
)
type ServiceContext struct {
Config config.Config
Redis *redis.Redis // 添加 Redis 客户端
}
func NewServiceContext(c config.Config) *ServiceContext {
return &ServiceContext{
Config: c,
Redis: redis.MustNewRedis(c.Redis), // 初始化 Redis
}
}
```
#### 更新 Deployment 环境变量
```yaml
# deploy/k8s/service/user/user-rpc.yaml
env:
- name: DB_URI
valueFrom:
secretKeyRef:
name: user-db-app
key: uri
- name: REDIS_PASSWORD # 添加 Redis 密码
valueFrom:
secretKeyRef:
name: user-redis
key: password
```
---
### 2. Redis 性能监控
已启用 Redis Exporter(端口 9121),可以配置 Prometheus 监控:
```yaml
apiVersion: v1
kind: ServiceMonitor
metadata:
name: user-redis-metrics
namespace: juwan
spec:
selector:
matchLabels:
app: user-redis
endpoints:
- port: redis-exporter
interval: 30s
```
**监控指标:**
- redis_up: 实例状态
- redis_connected_clients: 连接数
- redis_memory_used_bytes: 内存使用
- redis_commands_processed_total: 命令处理数
- redis_master_repl_offset: 复制偏移量
---
### 3. 高可用性测试
#### 测试主节点故障转移
```bash
# 1. 查找当前主节点
kubectl exec -it user-redis-sentinel-sentinel-0 -n juwan -- redis-cli -p 26379 SENTINEL get-master-addr-by-name mymaster
# 2. 模拟主节点故障
kubectl delete pod user-redis-0 -n juwan
# 3. 观察 Sentinel 的故障转移过程
kubectl logs -f user-redis-sentinel-sentinel-0 -n juwan
# 4. 确认新主节点
kubectl exec -it user-redis-sentinel-sentinel-0 -n juwan -- redis-cli -p 26379 SENTINEL get-master-addr-by-name mymaster
```
#### 预期结果
- Sentinel 检测到主节点下线(5 秒)
- 2/3 Sentinel 节点达成共识(quorum=2
- 自动提升一个从节点为主节点
- 客户端自动重连到新主节点
---
### 4. 扩展为分片集群(未来)
当缓存数据量增长需要横向扩展时,可以迁移到 RedisCluster
```yaml
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisCluster
metadata:
name: user-redis-cluster
namespace: juwan
spec:
clusterSize: 6 # 3 主 + 3 从
kubernetesConfig:
image: quay.io/opstree/redis:v7.0.12
redisLeader:
replicas: 3
redisFollower:
replicas: 3
storage:
volumeClaimTemplate:
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 5Gi
```
**迁移步骤:**
1. 部署新的 RedisCluster
2. 使用 redis-cli --cluster import 迁移数据
3. 更新应用配置指向新集群
4. 下线旧的 Sentinel 集群
---
### 5. 备份策略
Redis Operator 不提供自动备份,建议配置定时任务:
```bash
# 创建 CronJob 定期执行 BGSAVE
apiVersion: batch/v1
kind: CronJob
metadata:
name: redis-backup
namespace: juwan
spec:
schedule: "0 2 * * *" # 每天凌晨 2 点
jobTemplate:
spec:
template:
spec:
containers:
- name: backup
image: redis:7.0.12
command:
- /bin/sh
- -c
- |
redis-cli -h user-redis-master -a $REDIS_PASSWORD BGSAVE
# 将 /data/dump.rdb 上传到对象存储
restartPolicy: OnFailure
```
---
## 📚 总结
### 关键经验
1. **理解资源依赖关系:** RedisSentinel 依赖 RedisReplication,部署顺序很重要
2. **资源命名规范:** 使用清晰的名称区分不同层次的资源(如 user-redis 和 user-redis-sentinel
3. **诊断思路:**
- 从现象(Pod 缺失)→ 资源状态(CRD 存在)→ Operator 日志 → 依赖检查
- 逐层排查,最终定位到 RedisReplication 缺失
4. **验证完整性:** 不仅要检查 Pod,还要验证 Service、StatefulSet 等所有相关资源
### 文档价值
本文档可用于:
- ✅ 团队知识传承
- ✅ 类似问题的快速排查手册
- ✅ 新成员的 Redis Operator 学习资料
- ✅ 事后复盘和经验总结
---
**最后更新时间:** 2026年2月22日
**文档状态:** ✅ 问题已解决,Redis 集群运行正常
**下一步行动:** 配置应用连接 Redis
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
+2
View File
@@ -9,6 +9,7 @@ require (
)
require (
filippo.io/edwards25519 v1.1.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
@@ -23,6 +24,7 @@ require (
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.4 // indirect
github.com/go-sql-driver/mysql v1.9.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
+6
View File
@@ -1,3 +1,7 @@
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
github.com/alicebob/miniredis/v2 v2.36.1 h1:Dvc5oAnNOr7BIfPn7tF269U8DvRW1dBG2D5n0WrfYMI=
github.com/alicebob/miniredis/v2 v2.36.1/go.mod h1:TcL7YfarKPGDAthEtl5NBeHZfeUQj6OXMm/+iu5cLMM=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
@@ -39,6 +43,8 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo=
github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-27
View File
@@ -1,27 +0,0 @@
import { search } from "@inquirer/prompts"
import Fuse from "fuse.js";
const colors = [
{ title: 'Red', value: 'red' },
{ title: 'Green', value: 'green' },
{ title: 'Blue', value: 'blue' },
{ title: 'Yellow', value: 'yellow' },
];
(async () => {
const fuse = new Fuse(colors, {
keys: ['title'],
threshold: 0.4,
})
const color = await search({
message: "Pick a color",
source: async (term) => {
if (!term) {
return colors.map(s => s.value);
}
const result = fuse.search(term);
return result.map(s => s.item.value);
}
})
console.log(color); // => { color: 'green' }
})();