add: anowflake email kafka, refa: redis connectg

This commit is contained in:
wwweww
2026-02-25 01:16:13 +08:00
parent fdbcde13b2
commit 300058ad01
67 changed files with 3596 additions and 139 deletions
+1
View File
@@ -120,3 +120,4 @@ dist
# End of https://mrkandreev.name/snippets/gitignore-generator/#Node # End of https://mrkandreev.name/snippets/gitignore-generator/#Node
DockerFile DockerFile
.idea
+1 -1
View File
@@ -49,7 +49,7 @@ const Paths = {
const svcTypes = await fs.readdir(servicePath); const svcTypes = await fs.readdir(servicePath);
svcTypes.map(svcType => all.push({ svcTypes.map(svcType => all.push({
title: `${service} - ${svcType}`, title: `${service} - ${svcType}`,
value: path.join(servicePath, svcType, svcType === "api" ? `${service}.go` : "pb.go"), value: path.join(servicePath, svcType, svcType !== "rpc" ? `${service}.go` : "pb.go"),
})); }));
} }
return all; return all;
+34
View File
@@ -0,0 +1,34 @@
// Code scaffolded by goctl. Safe to edit.
// goctl 1.9.2
package main
import (
"flag"
"fmt"
"juwan-backend/app/email/api/internal/config"
"juwan-backend/app/email/api/internal/handler"
"juwan-backend/app/email/api/internal/svc"
"github.com/zeromicro/go-zero/core/conf"
"github.com/zeromicro/go-zero/rest"
)
var configFile = flag.String("f", "etc/email-api.yaml", "the config file")
func main() {
flag.Parse()
var c config.Config
conf.MustLoad(*configFile, &c)
server := rest.MustNewServer(c.RestConf)
defer server.Stop()
ctx := svc.NewServiceContext(c)
handler.RegisterHandlers(server, ctx)
fmt.Printf("Starting server at %s:%d...\n", c.Host, c.Port)
server.Start()
}
+19
View File
@@ -0,0 +1,19 @@
Name: email-api
Host: 0.0.0.0
Port: 8888
CacheConf:
- Host: "${REDIS_M_HOST}"
Type: node
Pass: "${REDIS_PASSWORD}"
User: "default"
- Host: "${REDIS_S_HOST}"
Type: node
Pass: "${REDIS_PASSWORD}"
User: "default"
Kmq:
Name: email-api
Brokers:
- "${KAFKA_BROKER}"
Topic: "email-task"
+16
View File
@@ -0,0 +1,16 @@
// Code scaffolded by goctl. Safe to edit.
// goctl 1.9.2
package config
import (
"github.com/zeromicro/go-queue/kq"
"github.com/zeromicro/go-zero/core/stores/cache"
"github.com/zeromicro/go-zero/rest"
)
type Config struct {
rest.RestConf
CacheConf cache.CacheConf
Kmq kq.KqConf
}
@@ -0,0 +1,32 @@
// Code scaffolded by goctl. Safe to edit.
// goctl 1.9.2
package email
import (
"net/http"
"github.com/zeromicro/go-zero/rest/httpx"
"juwan-backend/app/email/api/internal/logic/email"
"juwan-backend/app/email/api/internal/svc"
"juwan-backend/app/email/api/internal/types"
)
// 发送邮箱验证码
func SendVerificationCodeHandler(svcCtx *svc.ServiceContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var req types.SendVerificationCodeReq
if err := httpx.Parse(r, &req); err != nil {
httpx.ErrorCtx(r.Context(), w, err)
return
}
l := email.NewSendVerificationCodeLogic(r.Context(), svcCtx)
resp, err := l.SendVerificationCode(&req)
if err != nil {
httpx.ErrorCtx(r.Context(), w, err)
} else {
httpx.OkJsonCtx(r.Context(), w, resp)
}
}
}
+30
View File
@@ -0,0 +1,30 @@
// Code generated by goctl. DO NOT EDIT.
// goctl 1.9.2
package handler
import (
"net/http"
email "juwan-backend/app/email/api/internal/handler/email"
"juwan-backend/app/email/api/internal/svc"
"github.com/zeromicro/go-zero/rest"
)
func RegisterHandlers(server *rest.Server, serverCtx *svc.ServiceContext) {
server.AddRoutes(
rest.WithMiddlewares(
[]rest.Middleware{serverCtx.Logger},
[]rest.Route{
{
// 发送邮箱验证码
Method: http.MethodPost,
Path: "/verification-code/send",
Handler: email.SendVerificationCodeHandler(serverCtx),
},
}...,
),
rest.WithPrefix("/api/email"),
)
}
@@ -0,0 +1,79 @@
// Code scaffolded by goctl. Safe to edit.
// goctl 1.9.2
package email
import (
"context"
"encoding/json"
"fmt"
"time"
"juwan-backend/app/email/api/internal/svc"
"juwan-backend/app/email/api/internal/types"
"juwan-backend/app/email/api/internal/utils"
"github.com/google/uuid"
"github.com/zeromicro/go-zero/core/logx"
)
type SendVerificationCodeLogic struct {
logx.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
// 发送邮箱验证码
func NewSendVerificationCodeLogic(ctx context.Context, svcCtx *svc.ServiceContext) *SendVerificationCodeLogic {
return &SendVerificationCodeLogic{
Logger: logx.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *SendVerificationCodeLogic) SendVerificationCode(req *types.SendVerificationCodeReq) (resp *types.SendVerificationCodeResp, err error) {
if l.svcCtx.RedisCluster == nil {
return nil, fmt.Errorf("redis not configured")
}
if l.svcCtx.EmailPusher == nil {
return nil, fmt.Errorf("kafka pusher not configured")
}
code := utils.GenCode()
requestID := uuid.NewString()
redisKey := fmt.Sprintf("%s:%s:%s", req.Email, code, req.Email)
if exists, getErr := l.svcCtx.RedisCluster.Get(l.ctx, redisKey).Result(); getErr == nil && exists != "" {
return nil, fmt.Errorf("verification code already sent, please wait before requesting a new one")
}
if setErr := l.svcCtx.RedisCluster.Set(l.ctx, redisKey, req.Scene, 60*time.Second).Err(); setErr != nil {
return nil, setErr
}
payload := map[string]any{
"type": "verification_code",
"requestId": requestID,
"email": req.Email,
"scene": req.Scene,
"code": code,
"expireIn": 60,
}
messageBytes, marshalErr := json.Marshal(payload)
if marshalErr != nil {
return nil, marshalErr
}
if pushErr := l.svcCtx.EmailPusher.PushWithKey(l.ctx, req.Email, string(messageBytes)); pushErr != nil {
return nil, pushErr
}
resp = &types.SendVerificationCodeResp{
RequestId: requestID,
ExpireInSec: 60,
Message: "verification code send task submitted",
}
return
}
@@ -0,0 +1,22 @@
// Code scaffolded by goctl. Safe to edit.
// goctl 1.9.2
package middleware
import "net/http"
type LoggerMiddleware struct {
}
func NewLoggerMiddleware() *LoggerMiddleware {
return &LoggerMiddleware{}
}
func (m *LoggerMiddleware) Handle(next http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
// TODO generate middleware implement function, delete after code implementation
// Passthrough to next handler if need
next(w, r)
}
}
@@ -0,0 +1,43 @@
// Code scaffolded by goctl. Safe to edit.
// goctl 1.9.2
package svc
import (
"time"
"juwan-backend/app/email/api/internal/config"
"juwan-backend/app/email/api/internal/middleware"
"juwan-backend/common/redisx"
"github.com/redis/go-redis/v9"
"github.com/zeromicro/go-queue/kq"
"github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/rest"
)
type ServiceContext struct {
Config config.Config
Logger rest.Middleware
RedisCluster *redis.ClusterClient
EmailPusher *kq.Pusher
}
func NewServiceContext(c config.Config) *ServiceContext {
redisConn, err := redisx.ConnectMasterSlaveCluster(c.CacheConf, 5*time.Second)
if err != nil {
logx.Errorf("failed to connect redis for email-api: %v", err)
}
var emailPusher *kq.Pusher
if len(c.Kmq.Brokers) > 0 && c.Kmq.Topic != "" {
emailPusher = kq.NewPusher(c.Kmq.Brokers, c.Kmq.Topic)
}
return &ServiceContext{
Config: c,
Logger: middleware.NewLoggerMiddleware().Handle,
RedisCluster: redisConn.Client,
EmailPusher: emailPusher,
}
}
+15
View File
@@ -0,0 +1,15 @@
// Code generated by goctl. DO NOT EDIT.
// goctl 1.9.2
package types
type SendVerificationCodeReq struct {
Email string `json:"email" binding:"required,email"`
Scene string `json:"scene" binding:"required,oneof=register login reset_password bind_email"`
}
type SendVerificationCodeResp struct {
RequestId string `json:"requestId"`
ExpireInSec int64 `json:"expireInSec"`
Message string `json:"message"`
}
+16
View File
@@ -0,0 +1,16 @@
package utils
import (
"crypto/rand"
"fmt"
"math/big"
)
func GenCode() string {
n, err := rand.Int(rand.Reader, big.NewInt(1000000))
if err != nil {
return "000000"
}
return fmt.Sprintf("%06d", n.Int64())
}
+33
View File
@@ -0,0 +1,33 @@
package main
import (
"flag"
"fmt"
"juwan-backend/app/email/mq/internal/config"
"juwan-backend/app/email/mq/internal/consumer"
"github.com/zeromicro/go-zero/core/conf"
"github.com/zeromicro/go-zero/core/service"
)
var configFile = flag.String("f", "etc/email.yaml", "the config file")
func main() {
flag.Parse()
var c config.Config
conf.MustLoad(*configFile, &c)
if err := c.SetUp(); err != nil {
panic(err)
}
serviceGroup := service.NewServiceGroup()
defer serviceGroup.Stop()
for _, mq := range consumer.Mqs(c) {
serviceGroup.Add(mq)
}
fmt.Print("Starting email service\n")
serviceGroup.Start()
}
+18
View File
@@ -0,0 +1,18 @@
Name: email-mq
Prometheus:
Host: 0.0.0.0
Port: 4003
Path: /metrics
Kmq:
Name: email-mq
Brokers:
- my-cluster-kafka-bootstrap.kafka.svc.cluster.local:9092
Topic: email-task
Group: email-consumer-group
ForceCommit: true
CommitInOrder: false
Offset: last
Consumers: 8
Processors: 8
+11
View File
@@ -0,0 +1,11 @@
package config
import (
"github.com/zeromicro/go-queue/kq"
"github.com/zeromicro/go-zero/core/service"
)
type Config struct {
service.ServiceConf
Kmq kq.KqConf
}
@@ -0,0 +1,21 @@
package consumer
import (
"context"
"juwan-backend/app/email/mq/internal/config"
"juwan-backend/app/email/mq/internal/svc"
"github.com/zeromicro/go-zero/core/service"
)
func Mqs(c config.Config) []service.Service {
//svcContext := NewServiceContext
ctx := context.Background()
svcCtx := svc.NewServiceContext(c)
var services []service.Service
services = append(services, Kqs(ctx, c, svcCtx)...)
return services
}
+15
View File
@@ -0,0 +1,15 @@
package consumer
import (
"context"
"juwan-backend/app/email/mq/internal/config"
"juwan-backend/app/email/mq/internal/logic"
"juwan-backend/app/email/mq/internal/svc"
"github.com/zeromicro/go-queue/kq"
"github.com/zeromicro/go-zero/core/service"
)
func Kqs(ctx context.Context, c config.Config, svcCtx *svc.ServiceContext) []service.Service {
return []service.Service{kq.MustNewQueue(c.Kmq, logic.NewSendVerificationCodeMq(ctx, c, svcCtx))}
}
@@ -0,0 +1,32 @@
package logic
import (
"context"
"juwan-backend/app/email/mq/internal/config"
"juwan-backend/app/email/mq/internal/svc"
"github.com/zeromicro/go-zero/core/logx"
)
type SendVerificationCodeMq struct {
c config.Config
ctx context.Context
svcCxt *svc.ServiceContext
}
func NewSendVerificationCodeMq(ctx context.Context, c config.Config, svcCtx *svc.ServiceContext) *SendVerificationCodeMq {
return &SendVerificationCodeMq{
c: c,
ctx: ctx,
svcCxt: svcCtx,
}
}
func (l *SendVerificationCodeMq) Consume(ctx context.Context, key, value string) error {
_ = ctx
_ = key
_ = value
logx.Infof("Consume get message key: %s, value: %s", key, value)
return nil
}
@@ -0,0 +1,13 @@
package svc
import "juwan-backend/app/email/mq/internal/config"
type ServiceContext struct {
c config.Config
}
func NewServiceContext(c config.Config) *ServiceContext {
return &ServiceContext{
c: c,
}
}
+10
View File
@@ -0,0 +1,10 @@
Name: snowflake.rpc
ListenOn: 0.0.0.0:8080
#Etcd:
# Hosts:
# - 127.0.0.1:2379
# Key: snowflake.rpc
Snowflake:
DatacenterId: 1
WorkerId: 0
@@ -0,0 +1,11 @@
package config
import "github.com/zeromicro/go-zero/zrpc"
type Config struct {
zrpc.RpcServerConf
Snowflake struct {
DatacenterId int64
WorkerId int64
}
}
@@ -0,0 +1,33 @@
package logic
import (
"context"
"juwan-backend/app/snowflake/rpc/internal/svc"
"juwan-backend/app/snowflake/rpc/snowflake"
"github.com/zeromicro/go-zero/core/logx"
)
type NextIdLogic struct {
ctx context.Context
svcCtx *svc.ServiceContext
logx.Logger
}
func NewNextIdLogic(ctx context.Context, svcCtx *svc.ServiceContext) *NextIdLogic {
return &NextIdLogic{
ctx: ctx,
svcCtx: svcCtx,
Logger: logx.WithContext(ctx),
}
}
func (l *NextIdLogic) NextId(_ *snowflake.NextIdReq) (*snowflake.NextIdResp, error) {
id, err := l.svcCtx.Generator.NextID()
if err != nil {
l.Error("generator.NextID", "err", err)
return nil, err
}
return &snowflake.NextIdResp{Id: id}, nil
}
@@ -0,0 +1,37 @@
package logic
import (
"context"
"errors"
"juwan-backend/app/snowflake/rpc/internal/svc"
"juwan-backend/app/snowflake/rpc/snowflake"
"github.com/zeromicro/go-zero/core/logx"
)
type NextIdsLogic struct {
ctx context.Context
svcCtx *svc.ServiceContext
logx.Logger
}
func NewNextIdsLogic(ctx context.Context, svcCtx *svc.ServiceContext) *NextIdsLogic {
return &NextIdsLogic{
ctx: ctx,
svcCtx: svcCtx,
Logger: logx.WithContext(ctx),
}
}
func (l *NextIdsLogic) NextIds(in *snowflake.NextIdsReq) (*snowflake.NextIdsResp, error) {
if in.Count <= 0 || in.Count > 1000 {
return nil, errors.New("count must be between 1 and 1000")
}
ids, err := l.svcCtx.Generator.NextIDs(int(in.Count))
if err != nil {
l.Errorf("generate snowflake ids failed: %v", err)
return nil, err
}
return &snowflake.NextIdsResp{Ids: ids}, nil
}
@@ -0,0 +1,94 @@
package generator
import (
"errors"
"sync"
"time"
)
const (
epoch = int64(1609459200000)
datacenterIdBits = uint(5)
workerIdBits = uint(5)
sequenceBits = uint(12)
maxDatacenterId = -1 ^ (-1 << datacenterIdBits)
maxWorkerId = -1 ^ (-1 << workerIdBits)
maxSequence = -1 ^ (-1 << sequenceBits)
workerIdShift = sequenceBits
datacenterIdShift = sequenceBits + workerIdBits
timestampLeftShift = sequenceBits + workerIdBits + datacenterIdBits
)
type Snowflake struct {
mu sync.Mutex
timestamp int64
datacenterId int64
workerId int64
sequence int64
}
func NewSnowflake(datacenterId, workerId int64) (*Snowflake, error) {
if datacenterId < 0 || datacenterId > maxDatacenterId {
return nil, errors.New("datacenter id must be between 0 and 31")
}
if workerId < 0 || workerId > maxWorkerId {
return nil, errors.New("worker id must be between 0 and 31")
}
return &Snowflake{
timestamp: 0,
datacenterId: datacenterId,
workerId: workerId,
sequence: 0,
}, nil
}
func (s *Snowflake) NextID() (int64, error) {
s.mu.Lock()
defer s.mu.Unlock()
now := time.Now().UnixMilli()
if now < s.timestamp {
return 0, errors.New("clock moved backwards")
}
if now == s.timestamp {
s.sequence = (s.sequence + 1) & maxSequence
if s.sequence == 0 {
for now <= s.timestamp {
now = time.Now().UnixMilli()
}
}
} else {
s.sequence = 0
}
s.timestamp = now
id := ((now - epoch) << timestampLeftShift) |
(s.datacenterId << datacenterIdShift) |
(s.workerId << workerIdShift) |
s.sequence
return id, nil
}
func (s *Snowflake) NextIDs(count int) ([]int64, error) {
if count <= 0 || count > 1000 {
return nil, errors.New("count must be between 1 and 1000")
}
ids := make([]int64, count)
for i := 0; i < count; i++ {
id, err := s.NextID()
if err != nil {
return nil, err
}
ids[i] = id
}
return ids, nil
}
@@ -0,0 +1,34 @@
// Code generated by goctl. DO NOT EDIT.
// goctl 1.9.2
// Source: snowflake.proto
package server
import (
"context"
"juwan-backend/app/snowflake/rpc/internal/logic"
"juwan-backend/app/snowflake/rpc/internal/svc"
"juwan-backend/app/snowflake/rpc/snowflake"
)
type SnowflakeServiceServer struct {
svcCtx *svc.ServiceContext
snowflake.UnimplementedSnowflakeServiceServer
}
func NewSnowflakeServiceServer(svcCtx *svc.ServiceContext) *SnowflakeServiceServer {
return &SnowflakeServiceServer{
svcCtx: svcCtx,
}
}
func (s *SnowflakeServiceServer) NextId(ctx context.Context, in *snowflake.NextIdReq) (*snowflake.NextIdResp, error) {
l := logic.NewNextIdLogic(ctx, s.svcCtx)
return l.NextId(in)
}
func (s *SnowflakeServiceServer) NextIds(ctx context.Context, in *snowflake.NextIdsReq) (*snowflake.NextIdsResp, error) {
l := logic.NewNextIdsLogic(ctx, s.svcCtx)
return l.NextIds(in)
}
@@ -0,0 +1,25 @@
package svc
import (
"juwan-backend/app/snowflake/rpc/internal/config"
generator "juwan-backend/app/snowflake/rpc/internal/pkg"
)
type ServiceContext struct {
Config config.Config
Generator *generator.Snowflake
}
func NewServiceContext(c config.Config) *ServiceContext {
gen, err := generator.NewSnowflake(
c.Snowflake.DatacenterId,
c.Snowflake.WorkerId,
)
if err != nil {
panic(err)
}
return &ServiceContext{
Config: c,
Generator: gen,
}
}
+39
View File
@@ -0,0 +1,39 @@
package main
import (
"flag"
"fmt"
"juwan-backend/app/snowflake/rpc/internal/config"
"juwan-backend/app/snowflake/rpc/internal/server"
"juwan-backend/app/snowflake/rpc/internal/svc"
"juwan-backend/app/snowflake/rpc/snowflake"
"github.com/zeromicro/go-zero/core/conf"
"github.com/zeromicro/go-zero/core/service"
"github.com/zeromicro/go-zero/zrpc"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
)
var configFile = flag.String("f", "etc/snowflake.yaml", "the config file")
func main() {
flag.Parse()
var c config.Config
conf.MustLoad(*configFile, &c)
ctx := svc.NewServiceContext(c)
s := zrpc.MustNewServer(c.RpcServerConf, func(grpcServer *grpc.Server) {
snowflake.RegisterSnowflakeServiceServer(grpcServer, server.NewSnowflakeServiceServer(ctx))
if c.Mode == service.DevMode || c.Mode == service.TestMode {
reflection.Register(grpcServer)
}
})
defer s.Stop()
fmt.Printf("Starting rpc server at %s...\n", c.ListenOn)
s.Start()
}
+263
View File
@@ -0,0 +1,263 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.11
// protoc v3.19.4
// source: snowflake.proto
package snowflake
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type NextIdReq struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NextIdReq) Reset() {
*x = NextIdReq{}
mi := &file_snowflake_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NextIdReq) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NextIdReq) ProtoMessage() {}
func (x *NextIdReq) ProtoReflect() protoreflect.Message {
mi := &file_snowflake_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NextIdReq.ProtoReflect.Descriptor instead.
func (*NextIdReq) Descriptor() ([]byte, []int) {
return file_snowflake_proto_rawDescGZIP(), []int{0}
}
type NextIdResp struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NextIdResp) Reset() {
*x = NextIdResp{}
mi := &file_snowflake_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NextIdResp) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NextIdResp) ProtoMessage() {}
func (x *NextIdResp) ProtoReflect() protoreflect.Message {
mi := &file_snowflake_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NextIdResp.ProtoReflect.Descriptor instead.
func (*NextIdResp) Descriptor() ([]byte, []int) {
return file_snowflake_proto_rawDescGZIP(), []int{1}
}
func (x *NextIdResp) GetId() int64 {
if x != nil {
return x.Id
}
return 0
}
type NextIdsReq struct {
state protoimpl.MessageState `protogen:"open.v1"`
Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NextIdsReq) Reset() {
*x = NextIdsReq{}
mi := &file_snowflake_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NextIdsReq) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NextIdsReq) ProtoMessage() {}
func (x *NextIdsReq) ProtoReflect() protoreflect.Message {
mi := &file_snowflake_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NextIdsReq.ProtoReflect.Descriptor instead.
func (*NextIdsReq) Descriptor() ([]byte, []int) {
return file_snowflake_proto_rawDescGZIP(), []int{2}
}
func (x *NextIdsReq) GetCount() int32 {
if x != nil {
return x.Count
}
return 0
}
type NextIdsResp struct {
state protoimpl.MessageState `protogen:"open.v1"`
Ids []int64 `protobuf:"varint,1,rep,packed,name=ids,proto3" json:"ids,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *NextIdsResp) Reset() {
*x = NextIdsResp{}
mi := &file_snowflake_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *NextIdsResp) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NextIdsResp) ProtoMessage() {}
func (x *NextIdsResp) ProtoReflect() protoreflect.Message {
mi := &file_snowflake_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NextIdsResp.ProtoReflect.Descriptor instead.
func (*NextIdsResp) Descriptor() ([]byte, []int) {
return file_snowflake_proto_rawDescGZIP(), []int{3}
}
func (x *NextIdsResp) GetIds() []int64 {
if x != nil {
return x.Ids
}
return nil
}
var File_snowflake_proto protoreflect.FileDescriptor
const file_snowflake_proto_rawDesc = "" +
"\n" +
"\x0fsnowflake.proto\x12\tsnowflake\"\v\n" +
"\tNextIdReq\"\x1c\n" +
"\n" +
"NextIdResp\x12\x0e\n" +
"\x02id\x18\x01 \x01(\x03R\x02id\"\"\n" +
"\n" +
"NextIdsReq\x12\x14\n" +
"\x05count\x18\x01 \x01(\x05R\x05count\"\x1f\n" +
"\vNextIdsResp\x12\x10\n" +
"\x03ids\x18\x01 \x03(\x03R\x03ids2\x83\x01\n" +
"\x10SnowflakeService\x125\n" +
"\x06NextId\x12\x14.snowflake.NextIdReq\x1a\x15.snowflake.NextIdResp\x128\n" +
"\aNextIds\x12\x15.snowflake.NextIdsReq\x1a\x16.snowflake.NextIdsRespB\rZ\v./snowflakeb\x06proto3"
var (
file_snowflake_proto_rawDescOnce sync.Once
file_snowflake_proto_rawDescData []byte
)
func file_snowflake_proto_rawDescGZIP() []byte {
file_snowflake_proto_rawDescOnce.Do(func() {
file_snowflake_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_snowflake_proto_rawDesc), len(file_snowflake_proto_rawDesc)))
})
return file_snowflake_proto_rawDescData
}
var file_snowflake_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_snowflake_proto_goTypes = []any{
(*NextIdReq)(nil), // 0: snowflake.NextIdReq
(*NextIdResp)(nil), // 1: snowflake.NextIdResp
(*NextIdsReq)(nil), // 2: snowflake.NextIdsReq
(*NextIdsResp)(nil), // 3: snowflake.NextIdsResp
}
var file_snowflake_proto_depIdxs = []int32{
0, // 0: snowflake.SnowflakeService.NextId:input_type -> snowflake.NextIdReq
2, // 1: snowflake.SnowflakeService.NextIds:input_type -> snowflake.NextIdsReq
1, // 2: snowflake.SnowflakeService.NextId:output_type -> snowflake.NextIdResp
3, // 3: snowflake.SnowflakeService.NextIds:output_type -> snowflake.NextIdsResp
2, // [2:4] is the sub-list for method output_type
0, // [0:2] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_snowflake_proto_init() }
func file_snowflake_proto_init() {
if File_snowflake_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_snowflake_proto_rawDesc), len(file_snowflake_proto_rawDesc)),
NumEnums: 0,
NumMessages: 4,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_snowflake_proto_goTypes,
DependencyIndexes: file_snowflake_proto_depIdxs,
MessageInfos: file_snowflake_proto_msgTypes,
}.Build()
File_snowflake_proto = out.File
file_snowflake_proto_goTypes = nil
file_snowflake_proto_depIdxs = nil
}
@@ -0,0 +1,159 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.6.1
// - protoc v3.19.4
// source: snowflake.proto
package snowflake
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
SnowflakeService_NextId_FullMethodName = "/snowflake.SnowflakeService/NextId"
SnowflakeService_NextIds_FullMethodName = "/snowflake.SnowflakeService/NextIds"
)
// SnowflakeServiceClient is the client API for SnowflakeService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type SnowflakeServiceClient interface {
NextId(ctx context.Context, in *NextIdReq, opts ...grpc.CallOption) (*NextIdResp, error)
NextIds(ctx context.Context, in *NextIdsReq, opts ...grpc.CallOption) (*NextIdsResp, error)
}
type snowflakeServiceClient struct {
cc grpc.ClientConnInterface
}
func NewSnowflakeServiceClient(cc grpc.ClientConnInterface) SnowflakeServiceClient {
return &snowflakeServiceClient{cc}
}
func (c *snowflakeServiceClient) NextId(ctx context.Context, in *NextIdReq, opts ...grpc.CallOption) (*NextIdResp, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(NextIdResp)
err := c.cc.Invoke(ctx, SnowflakeService_NextId_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *snowflakeServiceClient) NextIds(ctx context.Context, in *NextIdsReq, opts ...grpc.CallOption) (*NextIdsResp, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(NextIdsResp)
err := c.cc.Invoke(ctx, SnowflakeService_NextIds_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// SnowflakeServiceServer is the server API for SnowflakeService service.
// All implementations must embed UnimplementedSnowflakeServiceServer
// for forward compatibility.
type SnowflakeServiceServer interface {
NextId(context.Context, *NextIdReq) (*NextIdResp, error)
NextIds(context.Context, *NextIdsReq) (*NextIdsResp, error)
mustEmbedUnimplementedSnowflakeServiceServer()
}
// UnimplementedSnowflakeServiceServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedSnowflakeServiceServer struct{}
func (UnimplementedSnowflakeServiceServer) NextId(context.Context, *NextIdReq) (*NextIdResp, error) {
return nil, status.Error(codes.Unimplemented, "method NextId not implemented")
}
func (UnimplementedSnowflakeServiceServer) NextIds(context.Context, *NextIdsReq) (*NextIdsResp, error) {
return nil, status.Error(codes.Unimplemented, "method NextIds not implemented")
}
func (UnimplementedSnowflakeServiceServer) mustEmbedUnimplementedSnowflakeServiceServer() {}
func (UnimplementedSnowflakeServiceServer) testEmbeddedByValue() {}
// UnsafeSnowflakeServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to SnowflakeServiceServer will
// result in compilation errors.
type UnsafeSnowflakeServiceServer interface {
mustEmbedUnimplementedSnowflakeServiceServer()
}
func RegisterSnowflakeServiceServer(s grpc.ServiceRegistrar, srv SnowflakeServiceServer) {
// If the following call panics, it indicates UnimplementedSnowflakeServiceServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&SnowflakeService_ServiceDesc, srv)
}
func _SnowflakeService_NextId_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(NextIdReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SnowflakeServiceServer).NextId(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: SnowflakeService_NextId_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SnowflakeServiceServer).NextId(ctx, req.(*NextIdReq))
}
return interceptor(ctx, in, info, handler)
}
func _SnowflakeService_NextIds_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(NextIdsReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SnowflakeServiceServer).NextIds(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: SnowflakeService_NextIds_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SnowflakeServiceServer).NextIds(ctx, req.(*NextIdsReq))
}
return interceptor(ctx, in, info, handler)
}
// SnowflakeService_ServiceDesc is the grpc.ServiceDesc for SnowflakeService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var SnowflakeService_ServiceDesc = grpc.ServiceDesc{
ServiceName: "snowflake.SnowflakeService",
HandlerType: (*SnowflakeServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "NextId",
Handler: _SnowflakeService_NextId_Handler,
},
{
MethodName: "NextIds",
Handler: _SnowflakeService_NextIds_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "snowflake.proto",
}
@@ -0,0 +1,46 @@
// Code generated by goctl. DO NOT EDIT.
// goctl 1.9.2
// Source: snowflake.proto
package snowflakeservice
import (
"context"
"juwan-backend/app/snowflake/rpc/snowflake"
"github.com/zeromicro/go-zero/zrpc"
"google.golang.org/grpc"
)
type (
NextIdReq = snowflake.NextIdReq
NextIdResp = snowflake.NextIdResp
NextIdsReq = snowflake.NextIdsReq
NextIdsResp = snowflake.NextIdsResp
SnowflakeService interface {
NextId(ctx context.Context, in *NextIdReq, opts ...grpc.CallOption) (*NextIdResp, error)
NextIds(ctx context.Context, in *NextIdsReq, opts ...grpc.CallOption) (*NextIdsResp, error)
}
defaultSnowflakeService struct {
cli zrpc.Client
}
)
func NewSnowflakeService(cli zrpc.Client) SnowflakeService {
return &defaultSnowflakeService{
cli: cli,
}
}
func (m *defaultSnowflakeService) NextId(ctx context.Context, in *NextIdReq, opts ...grpc.CallOption) (*NextIdResp, error) {
client := snowflake.NewSnowflakeServiceClient(m.cli.Conn())
return client.NextId(ctx, in, opts...)
}
func (m *defaultSnowflakeService) NextIds(ctx context.Context, in *NextIdsReq, opts ...grpc.CallOption) (*NextIdsResp, error) {
client := snowflake.NewSnowflakeServiceClient(m.cli.Conn())
return client.NextIds(ctx, in, opts...)
}
@@ -37,23 +37,23 @@ func (l *RegisterLogic) Register(req *types.RegisterReq) (resp *types.RegisterRe
Username: req.Username, Username: req.Username,
}) })
if err == nil && existingUser != nil { if err == nil && existingUser != nil {
return nil, errors.New("用户已存在") return nil, errors.New("user already exists")
} }
// 生成用户ID // 生成用户ID
userId, err := uuid.NewRandom() userId, err := uuid.NewRandom()
if err != nil { if err != nil {
return nil, errors.New("注册失败:无法生成用户ID") return nil, errors.New("generate user ID failed")
} }
// 加密密码 // 加密密码
hashedPassword, err := utils.HashPassword(req.Password) hashedPassword, err := utils.HashPassword(req.Password)
if err != nil { if err != nil {
return nil, errors.New("注册失败:密码加密失败") return nil, errors.New("hash password failed")
} }
// 创建新用户 // 创建新用户
newUser, err := l.svcCtx.UserRpc.AddUsers(l.ctx, &pb.AddUsersReq{ _res, err := l.svcCtx.UserRpc.AddUsers(l.ctx, &pb.AddUsersReq{
UserId: userId.String(), UserId: userId.String(),
Username: req.Username, Username: req.Username,
Passwd: hashedPassword, Passwd: hashedPassword,
@@ -62,14 +62,9 @@ func (l *RegisterLogic) Register(req *types.RegisterReq) (resp *types.RegisterRe
}) })
if err != nil { if err != nil {
l.Errorf("AddUsers failed: %v", err) l.Errorf("AddUsers failed: %v", err)
return nil, errors.New("注册失败:创建用户失败") return nil, errors.New("add user failed")
} }
// 返回响应 // 返回响应
return &types.RegisterResp{ return &types.RegisterResp{}, nil
UserId: int64(newUser.), // RPC 返回的可能是用户信息,这里简化处理
Username: req.Username,
Email: req.Email,
Message: "注册成功",
}, nil
} }
+13 -2
View File
@@ -8,9 +8,20 @@ Prometheus:
DataSource: "${DB_URI}?sslmode=disable" DataSource: "${DB_URI}?sslmode=disable"
SnowflakeRpcConf:
Target: k8s://juwan/snowflake-svc:8080
DB:
Master: "postgresql://${PD_USERNAME}:${DB_PASSWORD}@user-db-rw.juwan:${DB_PORT}/${DB_NAME}?sslmode=disable"
Slave: "postgresql://${PD_USERNAME}:${DB_PASSWORD}@user-db-ro.juwan:${DB_PORT}/${DB_NAME}?sslmode=disable"
CacheConf: CacheConf:
- Host: "${REDIS_HOST}" - Host: "${REDIS_M_HOST}"
Type: cluster Type: node
Pass: "${REDIS_PASSWORD}"
User: "default"
- Host: "${REDIS_S_HOST}"
Type: node
Pass: "${REDIS_PASSWORD}" Pass: "${REDIS_PASSWORD}"
User: "default" User: "default"
+7 -2
View File
@@ -13,6 +13,11 @@ type JwtConfig struct {
type Config struct { type Config struct {
zrpc.RpcServerConf zrpc.RpcServerConf
DataSource string `json:"dataSource"` DataSource string `json:"dataSource"`
CacheConf cache.CacheConf DB struct {
Jwt JwtConfig `json:"jwt"` Master string
Slave string
}
CacheConf cache.CacheConf
Jwt JwtConfig `json:"jwt"`
SnowflakeRpcConf zrpc.RpcClientConf
} }
@@ -25,9 +25,7 @@ func NewGetUserByUsernameLogic(ctx context.Context, svcCtx *svc.ServiceContext)
} }
func (l *GetUserByUsernameLogic) GetUserByUsername(in *pb.GetUserByUsernameReq) (*pb.GetUserByUsernameResp, error) { func (l *GetUserByUsernameLogic) GetUserByUsername(in *pb.GetUserByUsernameReq) (*pb.GetUserByUsernameResp, error) {
// todo: add your logic here and delete this line user, err := l.svcCtx.UsersModelRO.FindOneByUsername(l.ctx, in.Username)
user, err := l.svcCtx.UsersModel.FindOneByUsername(l.ctx, in.Username)
pbUsers := &pb.Users{} pbUsers := &pb.Users{}
converter.StructToStruct(user, pbUsers) converter.StructToStruct(user, pbUsers)
if err == nil || user != nil { if err == nil || user != nil {
@@ -5,6 +5,7 @@ import (
"juwan-backend/app/users/rpc/internal/svc" "juwan-backend/app/users/rpc/internal/svc"
"juwan-backend/app/users/rpc/pb" "juwan-backend/app/users/rpc/pb"
"juwan-backend/common/converter"
"github.com/zeromicro/go-zero/core/logx" "github.com/zeromicro/go-zero/core/logx"
) )
@@ -25,6 +26,12 @@ func NewGetUsersByIdLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetU
func (l *GetUsersByIdLogic) GetUsersById(in *pb.GetUsersByIdReq) (*pb.GetUsersByIdResp, error) { func (l *GetUsersByIdLogic) GetUsersById(in *pb.GetUsersByIdReq) (*pb.GetUsersByIdResp, error) {
// todo: add your logic here and delete this line // todo: add your logic here and delete this line
user, err := l.svcCtx.UsersModelRO.FindOne(l.ctx, in.Id)
if err != nil {
return nil, err
}
pbUser := &pb.Users{}
converter.StructToStruct(&user, &pbUser)
return &pb.GetUsersByIdResp{}, nil return &pb.GetUsersByIdResp{Users: pbUser}, nil
} }
@@ -32,11 +32,11 @@ var (
type ( type (
usersModel interface { usersModel interface {
Insert(ctx context.Context, data *Users) (sql.Result, error) Insert(ctx context.Context, data *Users) (sql.Result, error)
FindOne(ctx context.Context, userId string) (*Users, error) FindOne(ctx context.Context, userId int64) (*Users, error)
FindOneByPhone(ctx context.Context, phone string) (*Users, error) FindOneByPhone(ctx context.Context, phone string) (*Users, error)
FindOneByUsername(ctx context.Context, username string) (*Users, error) FindOneByUsername(ctx context.Context, username string) (*Users, error)
Update(ctx context.Context, data *Users) error Update(ctx context.Context, data *Users) error
Delete(ctx context.Context, userId string) error Delete(ctx context.Context, userId int64) error
} }
defaultUsersModel struct { defaultUsersModel struct {
@@ -45,7 +45,7 @@ type (
} }
Users struct { Users struct {
UserId string `db:"user_id"` UserId int64 `db:"user_id"`
Username string `db:"username"` Username string `db:"username"`
Passwd string `db:"passwd"` Passwd string `db:"passwd"`
Nickname string `db:"nickname"` Nickname string `db:"nickname"`
@@ -66,7 +66,7 @@ func newUsersModel(conn sqlx.SqlConn, c cache.CacheConf, opts ...cache.Option) *
} }
} }
func (m *defaultUsersModel) Delete(ctx context.Context, userId string) error { func (m *defaultUsersModel) Delete(ctx context.Context, userId int64) error {
data, err := m.FindOne(ctx, userId) data, err := m.FindOne(ctx, userId)
if err != nil { if err != nil {
return err return err
@@ -82,7 +82,7 @@ func (m *defaultUsersModel) Delete(ctx context.Context, userId string) error {
return err return err
} }
func (m *defaultUsersModel) FindOne(ctx context.Context, userId string) (*Users, error) { func (m *defaultUsersModel) FindOne(ctx context.Context, userId int64) (*Users, error) {
publicUsersUserIdKey := fmt.Sprintf("%s%v", cachePublicUsersUserIdPrefix, userId) publicUsersUserIdKey := fmt.Sprintf("%s%v", cachePublicUsersUserIdPrefix, userId)
var resp Users var resp Users
err := m.QueryRowCtx(ctx, &resp, publicUsersUserIdKey, func(ctx context.Context, conn sqlx.SqlConn, v any) error { err := m.QueryRowCtx(ctx, &resp, publicUsersUserIdKey, func(ctx context.Context, conn sqlx.SqlConn, v any) error {
+20 -16
View File
@@ -1,10 +1,12 @@
package svc package svc
import ( import (
"context" "juwan-backend/app/snowflake/rpc/snowflake"
"juwan-backend/app/users/rpc/internal/config" "juwan-backend/app/users/rpc/internal/config"
"juwan-backend/app/users/rpc/internal/models" "juwan-backend/app/users/rpc/internal/models"
"juwan-backend/app/users/rpc/internal/utils" "juwan-backend/app/users/rpc/internal/utils"
"juwan-backend/common/redisx"
"juwan-backend/common/snowflakex"
"time" "time"
"github.com/redis/go-redis/v9" "github.com/redis/go-redis/v9"
@@ -14,30 +16,30 @@ import (
type ServiceContext struct { type ServiceContext struct {
Config config.Config Config config.Config
UsersModel models.UsersModel UsersModelRW models.UsersModel
UsersModelRO models.UsersModel
RedisCluster *redis.ClusterClient RedisCluster *redis.ClusterClient
Snowflake snowflake.SnowflakeServiceClient
JwtManager *utils.JwtManager JwtManager *utils.JwtManager
} }
func NewServiceContext(c config.Config) *ServiceContext { func NewServiceContext(c config.Config) *ServiceContext {
conn := sqlx.NewSqlConn("postgres", c.DataSource) RWDBConn := sqlx.NewSqlConn("postgres", c.DB.Master)
RODBConn := sqlx.NewSqlConn("postgres", c.DB.Slave)
logx.Infof("success to connect to postgres~") logx.Infof("success to connect to postgres~")
// Initialize Redis Cluster client from CacheConf // Initialize Redis Cluster client from CacheConf
var redisCluster *redis.ClusterClient redisConn, err := redisx.ConnectMasterSlaveCluster(c.CacheConf, 5*time.Second)
if len(c.CacheConf) > 0 { redisCluster := redisConn.Client
redisCluster = redis.NewClusterClient(&redis.ClusterOptions{ if redisCluster != nil {
Addrs: []string{c.CacheConf[0].Host}, if err != nil {
Password: c.CacheConf[0].Pass,
})
// Test Redis Cluster connection
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := redisCluster.Ping(ctx).Err(); err != nil {
logx.Errorf("failed to connect to redis cluster: %v", err) logx.Errorf("failed to connect to redis cluster: %v", err)
} else { } else {
logx.Infof("success to connect to redis cluster~") if redisConn.HasSlave {
logx.Infof("success to connect to redis master/slave (M: %s, S: %s)", redisConn.MasterHost, redisConn.SlaveHost)
} else {
logx.Infof("success to connect to redis master (M: %s), slave not configured", redisConn.MasterHost)
}
} }
} }
@@ -46,8 +48,10 @@ func NewServiceContext(c config.Config) *ServiceContext {
return &ServiceContext{ return &ServiceContext{
Config: c, Config: c,
UsersModel: models.NewUsersModel(conn, c.CacheConf), UsersModelRW: models.NewUsersModel(RWDBConn, c.CacheConf),
UsersModelRO: models.NewUsersModel(RODBConn, c.CacheConf),
RedisCluster: redisCluster, RedisCluster: redisCluster,
JwtManager: jwtManager, JwtManager: jwtManager,
Snowflake: snowflakex.NewClient(c.SnowflakeRpcConf),
} }
} }
-29
View File
@@ -1,29 +0,0 @@
package converter
import (
"app/users/rpc/internal/models"
"app/users/rpc/pb"
)
// UserModelToPb 将 Users Model 转换为 protobuf Users
// 使用通用转换函数,自动处理所有字段
func UserModelToPb(user *models.Users) *pb.Users {
if user == nil {
return nil
}
pbUser := &pb.Users{}
_ = StructToStruct(user, pbUser)
return pbUser
}
// UserModelsToPb 将多个 Users Model 转换为 protobuf Users
// 使用通用转换函数,自动处理所有元素
func UserModelsToPb(users []*models.Users) []*pb.Users {
if len(users) == 0 {
return []*pb.Users{}
}
result, _ := SliceToSlice(users, []*pb.Users{})
return result.([]*pb.Users)
}
+96
View File
@@ -0,0 +1,96 @@
package redisx
import (
"context"
"time"
"github.com/redis/go-redis/v9"
"github.com/zeromicro/go-zero/core/stores/cache"
)
type MasterSlaveCluster struct {
Client *redis.ClusterClient
MasterHost string
SlaveHost string
HasSlave bool
}
func NewMasterSlaveCluster(cacheConf cache.CacheConf) *MasterSlaveCluster {
cacheConf = filterCacheConf(cacheConf)
if len(cacheConf) == 0 {
return &MasterSlaveCluster{}
}
master := cacheConf[0]
slave := cacheConf[0]
hasSlave := len(cacheConf) > 1
if hasSlave {
slave = cacheConf[1]
}
client := redis.NewClusterClient(&redis.ClusterOptions{
Addrs: []string{master.Host},
Username: master.User,
Password: master.Pass,
ReadOnly: hasSlave,
ClusterSlots: func(ctx context.Context) ([]redis.ClusterSlot, error) {
nodes := []redis.ClusterNode{{Addr: master.Host}}
if hasSlave {
nodes = append(nodes, redis.ClusterNode{Addr: slave.Host})
}
return []redis.ClusterSlot{{
Start: 0,
End: 16383,
Nodes: nodes,
}}, nil
},
})
return &MasterSlaveCluster{
Client: client,
MasterHost: master.Host,
SlaveHost: slave.Host,
HasSlave: hasSlave,
}
}
func ConnectMasterSlaveCluster(cacheConf cache.CacheConf, timeout time.Duration) (*MasterSlaveCluster, error) {
cluster := NewMasterSlaveCluster(cacheConf)
if cluster == nil || cluster.Client == nil {
return cluster, nil
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
if err := cluster.Ping(ctx); err != nil {
return cluster, err
}
return cluster, nil
}
func (m *MasterSlaveCluster) Ping(ctx context.Context) error {
if m == nil || m.Client == nil {
return nil
}
return m.Client.Ping(ctx).Err()
}
func filterCacheConf(cacheConf cache.CacheConf) cache.CacheConf {
if len(cacheConf) == 0 {
return cacheConf
}
filtered := make(cache.CacheConf, 0, len(cacheConf))
for _, node := range cacheConf {
if node.Host == "" {
continue
}
filtered = append(filtered, node)
}
return filtered
}
+11
View File
@@ -0,0 +1,11 @@
package snowflakex
import (
"juwan-backend/app/snowflake/rpc/snowflake"
"github.com/zeromicro/go-zero/zrpc"
)
func NewClient(conf zrpc.RpcClientConf) snowflake.SnowflakeServiceClient {
return snowflake.NewSnowflakeServiceClient(zrpc.MustNewClient(conf).Conn())
}
+184
View File
@@ -0,0 +1,184 @@
# Operator 安装与示例使用
本文档提供 Strimzi Operator 与 MongoDB Community Operator 的两种安装方式:
- Helm 安装
- kubectl 安装
> 示例资源文件位于 `deploy/example`,默认使用 `juwan` 命名空间。
> 请先确保你的 Operator 能 watch 到 `juwan`,否则请改 namespace 或调整 Operator watch 范围。
## 1) Strimzi OperatorKafka
### 1.1 使用 Helm 安装
```bash
kubectl create namespace kafka
helm repo add strimzi https://strimzi.io/charts/
helm repo update
helm install strimzi-kafka-operator strimzi/strimzi-kafka-operator -n kafka
```
### 1.2 使用 kubectl 安装
```bash
kubectl create namespace kafka
kubectl apply -f https://strimzi.io/install/latest?namespace=kafka -n kafka
```
### 1.3 安装验证
```bash
kubectl get pods -n kafka
kubectl get crd | grep kafka.strimzi.io
```
### 1.4 应用 Kafka 示例
```bash
kubectl create namespace juwan
kubectl apply -f deploy/example/kafka-strimzi-example.yaml
kubectl get kafka,kafkatopic,kafkauser -n juwan
```
## 2) MongoDB Community Operator
### 2.1 使用 Helm 安装
```bash
kubectl create namespace mongodb
helm repo add mongodb https://mongodb.github.io/helm-charts
helm repo update
helm install mongodb-kubernetes-operator mongodb/community-operator -n mongodb
```
### 2.2 使用 kubectl 安装
```bash
kubectl create namespace mongodb
kubectl apply -f https://raw.githubusercontent.com/mongodb/mongodb-kubernetes-operator/master/config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml
kubectl apply -k https://github.com/mongodb/mongodb-kubernetes-operator/config/rbac/
kubectl apply -k https://github.com/mongodb/mongodb-kubernetes-operator/config/manager/
```
### 2.3 安装验证
```bash
kubectl get pods -n mongodb
kubectl get crd | grep mongodbcommunity.mongodb.com
```
### 2.4 应用 MongoDB 示例
```bash
kubectl create namespace juwan
kubectl apply -f deploy/example/mongodb-community-example.yaml
kubectl get mongodbcommunity -n juwan
```
## 3) MongoDB:哨兵集群与分片集群搭建
### 3.1 关于“哨兵集群”的说明
MongoDB 没有 Redis Sentinel 的独立哨兵组件。
MongoDB 的高可用由 **Replica Set(副本集)** 原生完成(自动主从切换、故障恢复)。
因此在 MongoDB 场景里,“哨兵集群”通常对应为“副本集高可用集群”。
### 3.2 MongoDB“哨兵等价”方案:副本集高可用
本仓库提供了高可用副本集 YAML`deploy/example/mongodb-ha-replicaset-example.yaml`
```bash
kubectl create namespace juwan
kubectl apply -f deploy/example/mongodb-ha-replicaset-example.yaml
kubectl get mongodbcommunity -n juwan
```
查看副本集状态(任选一个 Pod 进入):
```bash
kubectl get pods -n juwan
kubectl exec -it -n juwan <mongodb-pod-name> -- mongosh --eval "rs.status()"
```
生产建议:
- 成员数保持奇数(3/5/7
- 使用持久化卷(PVC),不要用临时盘
- 跨可用区调度(反亲和)
- 开启备份与监控
### 3.3 MongoDB 分片集群架构(Sharded Cluster
分片集群由三层组成:
- Config Server ReplicaSet(保存分片元数据,建议 3 节点)
- Shard ReplicaSet(每个分片都是副本集,建议每分片 3 节点)
- Mongos(路由层,对业务暴露统一入口)
### 3.4 分片集群搭建步骤(kubectl 方式)
> 说明:MongoDB Community Operator 主要用于副本集管理。分片集群在社区实践中通常采用“手动编排(StatefulSet/Service+ mongosh 初始化”。
本仓库提供了分片集群基础编排 YAML:`deploy/example/mongodb-sharded-cluster-example.yaml`
```bash
kubectl create namespace juwan
kubectl apply -f deploy/example/mongodb-sharded-cluster-example.yaml
kubectl get pods,svc -n juwan
```
1) 部署 Config Server 副本集(3 节点)
- 使用 StatefulSet + Headless Service 部署 `mongod --configsvr --replSet cfg-rs`
1) 部署 Shard 副本集(例如 `shard1-rs``shard2-rs`,每个 3 节点)
- 使用 StatefulSet + Headless Service 部署 `mongod --shardsvr --replSet <shard-rs-name>`
1) 部署 Mongos 路由层
- Deployment 部署 `mongos --configdb cfg-rs/<cfg-0>:27019,<cfg-1>:27019,<cfg-2>:27019`
1) 初始化各副本集
```bash
# 初始化 Config Server RS
kubectl exec -it -n juwan <cfg-pod-0> -- mongosh --port 27019 --eval 'rs.initiate({_id:"cfg-rs",configsvr:true,members:[{_id:0,host:"cfg-0.cfg-svc.juwan.svc.cluster.local:27019"},{_id:1,host:"cfg-1.cfg-svc.juwan.svc.cluster.local:27019"},{_id:2,host:"cfg-2.cfg-svc.juwan.svc.cluster.local:27019"}]})'
# 初始化 shard1 RS
kubectl exec -it -n juwan <shard1-pod-0> -- mongosh --port 27018 --eval 'rs.initiate({_id:"shard1-rs",members:[{_id:0,host:"shard1-0.shard1-svc.juwan.svc.cluster.local:27018"},{_id:1,host:"shard1-1.shard1-svc.juwan.svc.cluster.local:27018"},{_id:2,host:"shard1-2.shard1-svc.juwan.svc.cluster.local:27018"}]})'
# 初始化 shard2 RS
kubectl exec -it -n juwan <shard2-pod-0> -- mongosh --port 27018 --eval 'rs.initiate({_id:"shard2-rs",members:[{_id:0,host:"shard2-0.shard2-svc.juwan.svc.cluster.local:27018"},{_id:1,host:"shard2-1.shard2-svc.juwan.svc.cluster.local:27018"},{_id:2,host:"shard2-2.shard2-svc.juwan.svc.cluster.local:27018"}]})'
```
1) 通过 Mongos 注册分片并启用分片
```bash
kubectl exec -it -n juwan <mongos-pod-name> -- mongosh --port 27017 --eval 'sh.addShard("shard1-rs/shard1-0.shard1-svc.juwan.svc.cluster.local:27018,shard1-1.shard1-svc.juwan.svc.cluster.local:27018,shard1-2.shard1-svc.juwan.svc.cluster.local:27018")'
kubectl exec -it -n juwan <mongos-pod-name> -- mongosh --port 27017 --eval 'sh.addShard("shard2-rs/shard2-0.shard2-svc.juwan.svc.cluster.local:27018,shard2-1.shard2-svc.juwan.svc.cluster.local:27018,shard2-2.shard2-svc.juwan.svc.cluster.local:27018")'
kubectl exec -it -n juwan <mongos-pod-name> -- mongosh --port 27017 --eval 'sh.enableSharding("appdb")'
kubectl exec -it -n juwan <mongos-pod-name> -- mongosh --port 27017 --eval 'sh.shardCollection("appdb.user_events", {"userId": "hashed"})'
```
1) 验证分片状态
```bash
kubectl exec -it -n juwan <mongos-pod-name> -- mongosh --port 27017 --eval 'sh.status()'
```
## 4) 卸载(可选)
### StrimziHelm 安装场景)
```bash
helm uninstall strimzi-kafka-operator -n kafka
```
### MongoDB OperatorHelm 安装场景)
```bash
helm uninstall mongodb-kubernetes-operator -n mongodb
```
+80
View File
@@ -0,0 +1,80 @@
# Strimzi Kafka 集群示例
# 前提:已安装 Strimzi Operator,且 Operator 具备对本命名空间的 watch 权限。
apiVersion: kafka.strimzi.io/v1beta2
kind: Kafka
metadata:
name: juwan-kafka
namespace: juwan # 示例业务命名空间
spec:
kafka:
version: 3.9.0 # Kafka Broker 版本
replicas: 1 # 开发环境可用;生产环境建议 >= 3
listeners:
- name: plain
port: 9092
type: internal # 仅集群内部访问
tls: false # 明文 listener,内网调试方便
- name: tls
port: 9093
type: internal
tls: true # TLS listener,推荐业务接入使用
config:
# 单副本容错参数(仅适合开发环境)
offsets.topic.replication.factor: 1
transaction.state.log.replication.factor: 1
transaction.state.log.min.isr: 1
default.replication.factor: 1
min.insync.replicas: 1
storage:
type: ephemeral # 临时存储,Pod 重建会丢数据;生产建议 persistent-claim
zookeeper:
replicas: 1 # 开发环境可用;生产环境建议 >= 3
storage:
type: ephemeral
# 开启 Topic/User Operator,便于声明式管理 Topic 和账号
entityOperator:
topicOperator: {}
userOperator: {}
---
# 业务 Topic 示例
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaTopic
metadata:
name: user-events # 用户事件主题
namespace: juwan
labels:
strimzi.io/cluster: juwan-kafka # 关联 Kafka 集群名
spec:
partitions: 3 # 分区数,决定并行消费能力
replicas: 1 # 副本数,开发环境示例
config:
retention.ms: 604800000 # 7 天
segment.bytes: 1073741824 # 1GiB
---
# Kafka 用户与 ACL 示例
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaUser
metadata:
name: app-producer # 应用侧生产者账号
namespace: juwan
labels:
strimzi.io/cluster: juwan-kafka
spec:
authentication:
type: tls # 生成 TLS 证书凭据 Secret
authorization:
type: simple
acls:
- resource:
type: topic
name: user-events
patternType: literal
operations:
- Read
- Write
- resource:
type: group
name: app-consumer-group
patternType: literal
operations:
- Read
@@ -0,0 +1,36 @@
# MongoDB 应用用户密码示例(请改为更安全的值,或对接外部 Secret 管理)
apiVersion: v1
kind: Secret
metadata:
name: mongodb-app-user-password
namespace: juwan # 示例业务命名空间
type: Opaque
stringData:
password: ChangeMe123456 # 示例明文,仅用于演示
---
# MongoDB Community Operator 自定义资源示例
apiVersion: mongodbcommunity.mongodb.com/v1
kind: MongoDBCommunity
metadata:
name: juwan-mongodb
namespace: juwan
spec:
members: 3 # 副本集成员数,生产建议保持奇数
type: ReplicaSet
version: "7.0.12" # MongoDB 版本
security:
authentication:
modes:
- SCRAM # 启用用户名密码认证
users:
- name: app-user # 业务账号
db: admin
passwordSecretRef:
name: mongodb-app-user-password # 引用上方 Secret
roles:
- name: readWrite
db: appdb # 对 appdb 库授予读写
scramCredentialsSecretName: app-user-scram # Operator 生成的凭据 Secret
additionalMongodConfig:
# 示例:开启 WiredTiger 日志压缩
storage.wiredTiger.engineConfig.journalCompressor: zlib
@@ -0,0 +1,46 @@
# MongoDB 高可用(副本集)示例
# 说明:MongoDB 没有 Redis Sentinel 组件;副本集即其高可用机制。
apiVersion: v1
kind: Secret
metadata:
name: mongodb-ha-app-user-password
namespace: juwan
type: Opaque
stringData:
password: ChangeMe_ReallyStrongPassword
---
apiVersion: mongodbcommunity.mongodb.com/v1
kind: MongoDBCommunity
metadata:
name: juwan-mongodb-ha
namespace: juwan
spec:
members: 3
type: ReplicaSet
version: "7.0.12"
# 生产建议开启持久化(具体 storageClassName 按集群调整)
statefulSet:
spec:
volumeClaimTemplates:
- metadata:
name: data-volume
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 20Gi
security:
authentication:
modes:
- SCRAM
users:
- name: app-user
db: admin
passwordSecretRef:
name: mongodb-ha-app-user-password
roles:
- name: readWrite
db: appdb
scramCredentialsSecretName: app-user-scram
additionalMongodConfig:
storage.wiredTiger.engineConfig.journalCompressor: zlib
@@ -0,0 +1,218 @@
# MongoDB 分片集群最小示例(ConfigRS + 2 个 ShardRS + Mongos
# 使用方式:
# 1) 先 apply 本文件
# 2) 按文档执行 rs.initiate / sh.addShard / sh.enableSharding
# 注意:本示例侧重结构演示,生产环境请补齐资源限制、反亲和、PDB、备份与监控。
---
apiVersion: v1
kind: Service
metadata:
name: cfg-svc
namespace: juwan
spec:
clusterIP: None
selector:
app: mongo-cfg
ports:
- name: mongo
port: 27019
targetPort: 27019
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: cfg
namespace: juwan
spec:
serviceName: cfg-svc
replicas: 3
selector:
matchLabels:
app: mongo-cfg
template:
metadata:
labels:
app: mongo-cfg
spec:
containers:
- name: mongod
image: mongo:7.0
args:
[
"--configsvr",
"--replSet",
"cfg-rs",
"--port",
"27019",
"--bind_ip_all",
]
ports:
- containerPort: 27019
name: mongo
volumeMounts:
- name: data
mountPath: /data/db
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: Service
metadata:
name: shard1-svc
namespace: juwan
spec:
clusterIP: None
selector:
app: mongo-shard1
ports:
- name: mongo
port: 27018
targetPort: 27018
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: shard1
namespace: juwan
spec:
serviceName: shard1-svc
replicas: 3
selector:
matchLabels:
app: mongo-shard1
template:
metadata:
labels:
app: mongo-shard1
spec:
containers:
- name: mongod
image: mongo:7.0
args:
[
"--shardsvr",
"--replSet",
"shard1-rs",
"--port",
"27018",
"--bind_ip_all",
]
ports:
- containerPort: 27018
name: mongo
volumeMounts:
- name: data
mountPath: /data/db
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: Service
metadata:
name: shard2-svc
namespace: juwan
spec:
clusterIP: None
selector:
app: mongo-shard2
ports:
- name: mongo
port: 27018
targetPort: 27018
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: shard2
namespace: juwan
spec:
serviceName: shard2-svc
replicas: 3
selector:
matchLabels:
app: mongo-shard2
template:
metadata:
labels:
app: mongo-shard2
spec:
containers:
- name: mongod
image: mongo:7.0
args:
[
"--shardsvr",
"--replSet",
"shard2-rs",
"--port",
"27018",
"--bind_ip_all",
]
ports:
- containerPort: 27018
name: mongo
volumeMounts:
- name: data
mountPath: /data/db
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: Service
metadata:
name: mongos
namespace: juwan
spec:
selector:
app: mongos
ports:
- name: mongo
port: 27017
targetPort: 27017
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: mongos
namespace: juwan
spec:
replicas: 2
selector:
matchLabels:
app: mongos
template:
metadata:
labels:
app: mongos
spec:
containers:
- name: mongos
image: mongo:7.0
args:
- "mongos"
- "--configdb"
- "cfg-rs/cfg-0.cfg-svc.juwan.svc.cluster.local:27019,cfg-1.cfg-svc.juwan.svc.cluster.local:27019,cfg-2.cfg-svc.juwan.svc.cluster.local:27019"
- "--bind_ip_all"
- "--port"
- "27017"
ports:
- containerPort: 27017
name: mongo
+33
View File
@@ -0,0 +1,33 @@
apiVersion: v1
kind: Service
metadata:
name: snowflake-sve
namespace: juwan
spec:
ClusterIP: None
selector:
app: snowflake
ports:
- port: 9000
targetPort: 9000
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: snowflake
namespace: juwan
spec:
serviceName: snowflake-svc
replicas: 3
selector:
matchLabels:
app: snowflake
template:
metadata:
labels:
app: snowflake
spec:
containers:
- name: snowflake
image:
+75
View File
@@ -0,0 +1,75 @@
# apiVersion: kafka.strimzi.io/v1
# kind: KafkaNodePool
# metadata:
# name: kafka-pool
# namespace: kafka
# labels:
# strimzi.io/cluster: my-cluster
# spec:
# replicas: 3
# roles:
# - controller
# - broker
# storage:
# type: jbod
# volumes:
# - id: 0
# type: persistent-claim
# size: 100Gi
# deleteClaim: false
# resources:
# requests:
# memory: 2Gi
# cpu: "1"
# limits:
# memory: 4Gi
# cpu: "2"
# ---
apiVersion: kafka.strimzi.io/v1
kind: KafkaNodePool
metadata:
name: controller-pool
namespace: kafka
labels:
strimzi.io/cluster: my-cluster
spec:
replicas: 3
roles:
- controller
storage:
type: persistent-claim
size: 10Gi
deleteClaim: false
resources:
requests:
memory: 1Gi
cpu: "0.5"
limits:
memory: 2Gi
cpu: "1"
---
apiVersion: kafka.strimzi.io/v1
kind: KafkaNodePool
metadata:
name: broker-pool
namespace: kafka
labels:
strimzi.io/cluster: my-cluster
spec:
replicas: 3
roles:
- broker
storage:
type: jbod
volumes:
- id: 0
type: persistent-claim
size: 100Gi
deleteClaim: false
resources:
requests:
memory: 2Gi
cpu: "1"
limits:
memory: 4Gi
cpu: "2"
+44
View File
@@ -0,0 +1,44 @@
apiVersion: kafka.strimzi.io/v1
kind: Kafka
metadata:
name: my-cluster
namespace: kafka
annotations:
strimzi.io/kraft: enabled
strimzi.io/node-pools: enabled
spec:
kafka:
version: 4.0.1
metadataVersion: 4.0-IV0
listeners:
- name: plain
port: 9092
type: internal
tls: false
- name: tls
port: 9093
type: internal
tls: true
config:
offsets.topic.replication.factor: 3
transaction.state.log.replication.factor: 3
transaction.state.log.min.isr: 2
default.replication.factor: 3
min.insync.replicas: 2
entityOperator:
topicOperator:
resources:
requests:
memory: 512Mi
cpu: "0.2"
limits:
memory: 512Mi
cpu: "0.5"
userOperator:
resources:
requests:
memory: 512Mi
cpu: "0.2"
limits:
memory: 512Mi
cpu: "0.5"
+13
View File
@@ -0,0 +1,13 @@
apiVersion: kafka.strimzi.io/v1
kind: KafkaTopic
metadata:
name: email-task
namespace: kafka
labels:
strimzi.io/cluster: my-cluster
spec:
partitions: 3
replicas: 3
config:
retention.ms: 604800000
segment.bytes: 1073741824
+43 -2
View File
@@ -11,7 +11,11 @@ metadata:
rules: rules:
- apiGroups: [""] - apiGroups: [""]
resources: resources:
- nodes
- pods - pods
- pods/log
- services
- endpoints
- namespaces - namespaces
verbs: ["get", "list", "watch"] verbs: ["get", "list", "watch"]
--- ---
@@ -50,6 +54,14 @@ data:
kubernetes_sd_configs: kubernetes_sd_configs:
- role: pod - role: pod
relabel_configs: relabel_configs:
- action: replace
source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_name]
target_label: app
regex: (.+)
- action: replace
source_labels: [__meta_kubernetes_pod_label_app]
target_label: app
regex: (.+)
- action: replace - action: replace
source_labels: [__meta_kubernetes_pod_node_name] source_labels: [__meta_kubernetes_pod_node_name]
target_label: node target_label: node
@@ -63,9 +75,29 @@ data:
source_labels: [__meta_kubernetes_pod_container_name] source_labels: [__meta_kubernetes_pod_container_name]
target_label: container target_label: container
- action: replace - action: replace
source_labels: [__meta_kubernetes_pod_uid] source_labels: [__meta_kubernetes_pod_uid, __meta_kubernetes_pod_container_name]
separator: /
target_label: __path__ target_label: __path__
replacement: /var/log/pods/*$1/*/*.log replacement: /var/log/pods/*$1/*.log
- job_name: kubernetes-pods-static
pipeline_stages:
- regex:
source: filename
expression: /var/log/pods/(?P<namespace>[^_]+)_(?P<pod>[^_]+)_[^/]+/(?P<container>[^/]+)/[0-9]+\.log
- regex:
source: pod
expression: ^(?P<app>.+?)(?:-[a-f0-9]{8,10}-[a-z0-9]{5}|-[0-9]+)?$
- labels:
namespace:
pod:
container:
app:
static_configs:
- targets:
- localhost
labels:
job: kubernetes-pods
__path__: /var/log/pods/*/*/*.log
--- ---
apiVersion: apps/v1 apiVersion: apps/v1
kind: DaemonSet kind: DaemonSet
@@ -87,6 +119,9 @@ spec:
containers: containers:
- name: promtail - name: promtail
image: grafana/promtail:2.9.6 image: grafana/promtail:2.9.6
securityContext:
runAsUser: 0
runAsGroup: 0
args: args:
- "-config.file=/etc/promtail/promtail.yaml" - "-config.file=/etc/promtail/promtail.yaml"
volumeMounts: volumeMounts:
@@ -97,6 +132,9 @@ spec:
- name: varlog - name: varlog
mountPath: /var/log mountPath: /var/log
readOnly: true readOnly: true
- name: dockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
volumes: volumes:
- name: config - name: config
configMap: configMap:
@@ -106,3 +144,6 @@ spec:
- name: varlog - name: varlog
hostPath: hostPath:
path: /var/log path: /var/log
- name: dockercontainers
hostPath:
path: /var/lib/docker/containers
+119
View File
@@ -0,0 +1,119 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: email-api
namespace: juwan
labels:
app: email-api
spec:
replicas: 3
revisionHistoryLimit: 5
selector:
matchLabels:
app: email-api
template:
metadata:
labels:
app: email-api
spec:
serviceAccountName: find-endpoints
containers:
- name: email-api
image: email
ports:
- containerPort: 8888
env:
- name: KAFKA_BROKER
value: "my-cluster-kafka-bootstrap.kafka.svc.cluster.local:9092"
- name: REDIS_M_HOST
value: "user-redis-master.juwan:6379"
- name: REDIS_S_HOST
value: "user-redis-replica.juwan:6379"
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: user-redis
key: password
readinessProbe:
tcpSocket:
port: 8888
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
tcpSocket:
port: 8888
initialDelaySeconds: 15
periodSeconds: 20
resources:
requests:
cpu: 500m
memory: 512Mi
limits:
cpu: 1000m
memory: 1024Mi
volumeMounts:
- name: timezone
mountPath: /etc/localtime
volumes:
- name: timezone
hostPath:
path: /usr/share/zoneinfo/Asia/Shanghai
---
apiVersion: v1
kind: Service
metadata:
name: email-api-svc
namespace: juwan
spec:
ports:
- port: 8888
targetPort: 8888
selector:
app: email-api
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: email-api-hpa-c
namespace: juwan
labels:
app: email-api-hpa-c
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: email-api
minReplicas: 3
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: email-api-hpa-m
namespace: juwan
labels:
app: email-api-hpa-m
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: email-api
minReplicas: 3
maxReplicas: 10
metrics:
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: 80
+100
View File
@@ -0,0 +1,100 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: email-consumer
namespace: juwan
labels:
app: email-consumer
spec:
replicas: 3
revisionHistoryLimit: 5
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
selector:
matchLabels:
app: email-consumer
template:
metadata:
labels:
app: email-consumer
spec:
serviceAccountName: find-endpoints
containers:
- name: email-consumer
image: 103.236.53.208:4418/library/email-consumer@sha256:6fe8a3a57310a5e79feecc4bf38ac2c5b8c58a7f200f104f7bf4707b9db5fc13
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
volumeMounts:
- name: timezone
mountPath: /etc/localtime
volumes:
- name: timezone
hostPath:
path: /usr/share/zoneinfo/Asia/Shanghai
---
apiVersion: v1
kind: Service
metadata:
name: email-consumer-svc
namespace: juwan
spec:
ports:
- port: 8080
targetPort: 8080
selector:
app: email-consumer
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: email-consumer-hpa-c
namespace: juwan
labels:
app: email-consumer-hpa-c
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: email-consumer
minReplicas: 1
maxReplicas: 3
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: email-consumer-hpa-m
namespace: juwan
labels:
app: email-consumer-hpa-m
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: email-consumer
minReplicas: 1
maxReplicas: 3
metrics:
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: 80
+107
View File
@@ -0,0 +1,107 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: snowflake
namespace: juwan
labels:
app: snowflake
spec:
replicas: 3
revisionHistoryLimit: 5
selector:
matchLabels:
app: snowflake
template:
metadata:
labels:
app: snowflake
spec:
serviceAccountName: find-endpoints
containers:
- name: snowflake
image: 103.236.53.208:4418/library/snowflake@sha256:1679cf94b69f426eec5d2f960ffb153bb7dbcd3bcaf0286261a43756384a86b3
ports:
- containerPort: 8080
readinessProbe:
tcpSocket:
port: 8080
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
tcpSocket:
port: 8080
initialDelaySeconds: 15
periodSeconds: 20
resources:
requests:
cpu: 500m
memory: 512Mi
limits:
cpu: 1000m
memory: 1024Mi
volumeMounts:
- name: timezone
mountPath: /etc/localtime
volumes:
- name: timezone
hostPath:
path: /usr/share/zoneinfo/Asia/Shanghai
---
apiVersion: v1
kind: Service
metadata:
name: snowflake-svc
namespace: juwan
spec:
ports:
- port: 8080
targetPort: 8080
selector:
app: snowflake
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: snowflake-hpa-c
namespace: juwan
labels:
app: snowflake-hpa-c
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: snowflake
minReplicas: 3
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: snowflake-hpa-m
namespace: juwan
labels:
app: snowflake-hpa-m
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: snowflake
minReplicas: 3
maxReplicas: 10
metrics:
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: 80
+65 -11
View File
@@ -29,18 +29,35 @@ spec:
] ]
containers: containers:
- name: user-rpc - name: user-rpc
image: user-rpc:v1 image: 103.236.53.208:4418/library/user-rpc@sha256:57746256905acb5757153aef536ebfd19338b7f935f01ba1f538fbfd0a12f6f5
ports: ports:
- containerPort: 9001 - containerPort: 9001
- containerPort: 4001 - containerPort: 4001
env: env:
- name: DB_URI - name: DB_PORT
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: user-db-app name: user-db-app
key: uri key: port
- name: REDIS_HOST - name: DB_PASSWORD
value: "user-redis.juwan:6379" valueFrom:
secretKeyRef:
name: user-db-app
key: password
- name: PD_USERNAME
valueFrom:
secretKeyRef:
name: user-db-app
key: username
- name: DB_NAME
valueFrom:
secretKeyRef:
name: user-db-app
key: dbname
- name: REDIS_M_HOST
value: "user-redis-master.juwan:6379"
- name: REDIS_S_HOST
value: "user-redis-replica.juwan:6379"
- name: REDIS_PASSWORD - name: REDIS_PASSWORD
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
@@ -143,9 +160,9 @@ spec:
type: Utilization type: Utilization
averageUtilization: 80 averageUtilization: 80
--- ---
# Redis Cluster # Redis 主从复制
apiVersion: redis.redis.opstreelabs.in/v1beta2 apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisCluster kind: RedisReplication
metadata: metadata:
name: user-redis name: user-redis
namespace: juwan namespace: juwan
@@ -161,9 +178,10 @@ spec:
limits: limits:
cpu: 500m cpu: 500m
memory: 512Mi memory: 512Mi
redisSecret: redisSecret:
name: user-redis name: user-redis
key: password key: password
redisExporter: redisExporter:
enabled: true enabled: true
image: quay.io/opstree/redis-exporter:latest image: quay.io/opstree/redis-exporter:latest
@@ -172,7 +190,43 @@ spec:
runAsUser: 1000 runAsUser: 1000
fsGroup: 1000 fsGroup: 1000
storage: storage:
size: 1Gi volumeClaimTemplate:
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
---
# Sentinel 监控
apiVersion: redis.redis.opstreelabs.in/v1beta2
kind: RedisSentinel
metadata:
name: user-redis-sentinel
namespace: juwan
spec:
clusterSize: 3
kubernetesConfig:
image: quay.io/opstree/redis-sentinel:v7.0.12
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
podSecurityContext:
runAsUser: 1000
fsGroup: 1000
redisSentinelConfig:
redisReplicationName: user-redis
masterGroupName: mymaster
redisPort: "6379"
quorum: "2"
downAfterMilliseconds: "5000"
failoverTimeout: "10000"
parallelSyncs: "1"
--- ---
# PostgreSQL 集群 # PostgreSQL 集群
+8
View File
@@ -1,3 +1,11 @@
kubectl create secret generic user-redis \ kubectl create secret generic user-redis \
--from-literal=password=$(openssl rand -base64 12) \ --from-literal=password=$(openssl rand -base64 12) \
--namespace juwan --namespace juwan
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm upgrade --install -f https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/main/docs/src/samples/monitoring/kube-stack-config.yaml prometheus-community prometheus-community/kube-prometheus-stack
kubectl apply -f https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.22/releases/cnpg-1.22.0.yaml
helm repo add ot-helm https://ot-container-kit.github.io/helm-charts
helm install redis-operator ot-helm/redis-operator
kubectl create namespace kafka
kubectl create -f 'https://strimzi.io/install/latest?namespace=kafka' -n kafka
+34
View File
@@ -0,0 +1,34 @@
syntax = "v1"
info (
author: "Asadz"
date: "2024-06-19"
version: "1.0"
)
type (
SendVerificationCodeReq {
Email string `json:"email" binding:"required,email"`
Scene string `json:"scene" binding:"required,oneof=register login reset_password bind_email"`
}
SendVerificationCodeResp {
RequestId string `json:"requestId"`
ExpireInSec int64 `json:"expireInSec"`
Message string `json:"message"`
}
)
@server (
group: email
prefix: /api/email
middleware: Logger
)
service email-api {
@doc (
summary: "发送邮箱验证码"
description: "向用户邮箱发送验证码,支持注册、登录、重置密码、绑定邮箱等场景"
)
@handler SendVerificationCode
post /verification-code/send (SendVerificationCodeReq) returns (SendVerificationCodeResp)
}
+92
View File
@@ -0,0 +1,92 @@
syntax = "proto3";
package email;
option go_package = "./email";
service EmailService {
// 发送验证码邮件
rpc SendVerificationCode(SendVerificationCodeReq) returns (SendVerificationCodeResp);
// 发送通用邮件
rpc SendEmail(SendEmailReq) returns (SendEmailResp);
// 批量发送邮件
rpc SendBatchEmail(SendBatchEmailReq) returns (SendBatchEmailResp);
// 发送模板邮件
rpc SendTemplateEmail(SendTemplateEmailReq) returns (SendTemplateEmailResp);
}
// 发送验证码请求
message SendVerificationCodeReq {
string to = 1; // 收件人邮箱
string code = 2; // 验证码
string type = 3; // 类型: register, reset, login
string language = 4; // 语言: zh-CN, en-US
int32 expire_minutes = 5; // 过期时间(分钟)
}
// 发送验证码响应
message SendVerificationCodeResp {
int32 code = 1;
string message = 2;
string message_id = 3; // 邮件ID,用于追踪
}
// 发送通用邮件请求
message SendEmailReq {
string to = 1; // 收件人
string subject = 2; // 主题
string body = 3; // 邮件内容(HTML)
string cc = 4; // 抄送
string bcc = 5; // 密送
repeated Attachment attachments = 6; // 附件
EmailPriority priority = 7; // 优先级
}
// 发送通用邮件响应
message SendEmailResp {
int32 code = 1;
string message = 2;
string message_id = 3;
}
// 批量发送邮件请求
message SendBatchEmailReq {
repeated string to_list = 1; // 收件人列表
string subject = 2;
string body = 3;
EmailPriority priority = 4;
}
// 批量发送邮件响应
message SendBatchEmailResp {
int32 code = 1;
string message = 2;
int32 success_count = 3;
int32 failed_count = 4;
repeated FailedEmail failed_list = 5;
}
// 发送模板邮件请求
message SendTemplateEmailReq {
string to = 1;
string template_name = 2; // 模板名称
map<string, string> params = 3; // 模板参数
string language = 4;
}
// 发送模板邮件响应
message SendTemplateEmailResp {
int32 code = 1;
string message = 2;
string message_id = 3;
}
// 附件
message Attachment {
string filename = 1;
bytes content = 2;
string content_type = 3; // MIME类型
}
// 邮件优先级
enum EmailPriority {
NORMAL = 0;
HIGH = 1;
LOW = 2;
}
// 失败的邮件
message FailedEmail {
string email = 1;
string reason = 2;
}
+23
View File
@@ -0,0 +1,23 @@
syntax = "proto3";
package snowflake;
option go_package = "./snowflake";
service SnowflakeService {
rpc NextId(NextIdReq) returns (NextIdResp);
rpc NextIds(NextIdsReq) returns (NextIdsResp);
}
message NextIdReq {}
message NextIdResp {
int64 id = 1;
}
message NextIdsReq {
int32 count = 1;
}
message NextIdsResp {
repeated int64 ids = 1;
}
+266
View File
@@ -0,0 +1,266 @@
# Email Consumer Kafka 投递与日志验证实验手册
## 1. 实验目标
验证 `email-task` consumer 是否能正常消费 Kafka 消息,并在日志中打印消费内容。
本实验同时给出两种验证方式:
1. `kubectl logs` 直接查看 Pod 日志
2. Grafana + Loki 查看聚合日志
---
## 2. 实验前提
### 2.1 需要满足的运行状态
```bash
kubectl -n juwan get pods -l app=email-task
kubectl -n kafka get pods
kubectl -n monitoring get pods
```
预期:
- `email-task` 至少 1 个 Pod 为 `Running`
- Kafka 集群有可用 broker(如 `my-cluster-kafka-pool-0`
- `loki/promtail/grafana``Running`(若需要 Loki 验证)
### 2.2 本次实验使用的关键配置
来自 `app/email/mq/etc/email.yaml`
- Broker: `my-cluster-kafka-bootstrap.kafka.svc.cluster.local:9092`
- Topic: `email-task`
- Group: `email-consumer-group`
---
## 3. 实验步骤(详细)
## 步骤 1:确认 Topic 存在
### 目的
避免消息投递到不存在的 Topic,导致消费端无数据。
### 指令
```bash
kubectl -n kafka exec my-cluster-kafka-pool-0 -- \
/opt/kafka/bin/kafka-topics.sh \
--bootstrap-server my-cluster-kafka-bootstrap:9092 \
--list
```
### 预期结果
输出中包含:
- `email-task`
---
## 步骤 2:投递一条最小测试消息(纯文本)
### 目的
先验证链路通路(producer -> kafka -> consumer)是否正常,不引入 JSON 转义复杂度。
### 指令
```bash
kubectl -n kafka exec my-cluster-kafka-pool-0 -- /bin/bash -lc \
"printf 'test-email-message\\n' | \
/opt/kafka/bin/kafka-console-producer.sh \
--bootstrap-server my-cluster-kafka-bootstrap:9092 \
--topic email-task"
```
### 预期结果
命令正常返回(通常无额外输出)。
---
## 步骤 3:查看 consumer 日志(kubectl 直查)
### 目的
确认 consumer 实际收到消息并执行日志打印。
### 指令(回看最近日志)
```bash
kubectl -n juwan logs -l app=email-task --tail=120
```
### 指令(实时追踪)
```bash
kubectl -n juwan logs -l app=email-task -f --since=10m
```
### 预期日志示例
```text
Consume get message key: , value: test-email-message
```
说明:
- key 为空是正常的(本次 producer 未设置 key
- value 为投递内容,说明消费链路正常
---
## 步骤 4:投递业务消息(验证码 JSON)
### 目的
模拟真实业务 payload,验证 consumer 对业务消息格式的处理。
### 指令
```bash
kubectl -n kafka exec my-cluster-kafka-pool-0 -- /bin/bash -lc "cat <<'EOF' | \
/opt/kafka/bin/kafka-console-producer.sh \
--bootstrap-server my-cluster-kafka-bootstrap:9092 \
--topic email-task
{\"type\":\"verification_code\",\"email\":\"test@example.com\",\"code\":\"123456\",\"scene\":\"login\",\"expired_minutes\":5}
EOF"
```
### 预期结果
- producer 正常返回
- `email-task` 日志可看到包含 JSON 的消费日志
---
## 步骤 5:投递业务消息(活动通知 JSON)
### 目的
验证另一类业务消息(活动通知)通路。
### 指令
```bash
kubectl -n kafka exec my-cluster-kafka-pool-0 -- /bin/bash -lc "cat <<'EOF' | \
/opt/kafka/bin/kafka-console-producer.sh \
--bootstrap-server my-cluster-kafka-bootstrap:9092 \
--topic email-task
{\"type\":\"activity_notice\",\"email\":\"test@example.com\",\"title\":\"春季活动\",\"content\":\"满100减20\",\"activity_id\":\"A20260225\"}
EOF"
```
### 预期结果
- producer 正常返回
- consumer 日志出现活动消息内容
---
## 步骤 6:使用 Loki/Grafana 验证(可选)
### 目的
确认日志采集链路(Promtail -> Loki -> Grafana)正常,便于后续线上排查。
### 6.1 打开 Grafana
```bash
kubectl port-forward -n monitoring svc/grafana 3000:3000
```
浏览器:`http://localhost:3000`
### 6.2 在 Explore 中查询
使用 Loki 数据源,输入:
```logql
{job="kubernetes-pods", namespace="juwan", app="email-task"} |= "Consume get message"
```
若没有结果:
1. 把时间范围调大到 `Last 6 hours`/`Last 24 hours`
2. 放宽查询条件:
```logql
{job="kubernetes-pods", namespace="juwan", pod=~"email-task-.*"}
```
---
## 4. 一键复现实验命令(顺序执行)
```bash
# 1) 查看 topic
kubectl -n kafka exec my-cluster-kafka-pool-0 -- /opt/kafka/bin/kafka-topics.sh --bootstrap-server my-cluster-kafka-bootstrap:9092 --list
# 2) 发测试消息
kubectl -n kafka exec my-cluster-kafka-pool-0 -- /bin/bash -lc "printf 'test-email-message\\n' | /opt/kafka/bin/kafka-console-producer.sh --bootstrap-server my-cluster-kafka-bootstrap:9092 --topic email-task"
# 3) 看 consumer 日志
kubectl -n juwan logs -l app=email-task --tail=120
```
---
## 5. 常见问题与处理
### 问题 1:发消息命令报引号/EOF错误
现象:`unexpected EOF while looking for matching`
原因:Shell 引号转义不正确。
处理:
- 先用纯文本消息验证链路
- JSON 使用 here-doc`cat <<'EOF'`)方式,避免转义混乱
### 问题 2:发了消息但 consumer 无日志
排查顺序:
1. `email-task` 是否 Running
2. Topic 是否正确(`email-task`
3. consumer group 是否一致(`email-consumer-group`
4. 查看 Pod 实时日志(`-f`
5. 若只看 Loki,请放大时间窗口并放宽标签条件
### 问题 3Loki 查不到但 kubectl logs 能看到
说明业务正常,问题在日志采集查询链路:
- 检查 Promtail target 是否 ready
- 检查 Loki 查询标签/时间范围
- 参考 `docs/loki-log-troubleshooting.md`
---
## 6. 实验结论判定标准
满足以下任一即可判定消费链路可用:
1. `kubectl logs` 出现:`Consume get message ...`
2. Grafana Loki 查询出现对应消费日志
若两者都出现,说明:
- Kafka 投递正常
- Consumer 消费正常
- 日志采集与检索链路正常
---
## 7. 关联文档
- Loki 使用:`docs/loki-usage-guide.md`
- Loki 排错:`docs/loki-log-troubleshooting.md`
- Email 部署排错:`docs/email-task-deployment-troubleshooting.md`
@@ -0,0 +1,147 @@
# Email Task 部署故障排查与修复记录
## 1. 问题现象
部署 `email-task` 时出现调度失败:
```text
Warning FailedScheduling 0/1 nodes are available: 1 Insufficient memory.
no new claims to deallocate, preemption: 0/1 nodes are available:
1 No preemption victims found for incoming pod.
```
表现为:
- `Deployment` 期望副本无法全部就绪
- `Pod` 长时间 `Pending`
---
## 2. 排查思路
按以下顺序排查:
1. **看部署配置是否过高请求**`requests/limits` + `replicas`
2. **看节点可分配资源和已分配资源**(确认是否真的是内存不足)
3. **看滚动策略是否会额外拉起新 Pod**`maxSurge` 可能放大内存压力)
4. **看容器健康检查是否匹配服务类型**(任务型服务不一定监听端口)
---
## 3. 关键排查命令
### 3.1 查看节点可分配资源
```powershell
kubectl get nodes -o custom-columns=NAME:.metadata.name,ALLOCATABLE_CPU:.status.allocatable.cpu,ALLOCATABLE_MEM:.status.allocatable.memory
```
### 3.2 查看部署与 Pod 状态
```powershell
kubectl -n juwan get deploy email-task -o wide
kubectl -n juwan get pods -l app=email-task -o wide
kubectl -n juwan describe pod -l app=email-task
```
### 3.3 查看节点资源分配占比
```powershell
kubectl describe node minikube
```
关注输出中的 `Allocated resources`
- `memory requests` 已接近节点上限(本次约 97%
### 3.4 查看部署策略与探针配置
```powershell
kubectl -n juwan get deploy email-task -o yaml
kubectl -n juwan logs deploy/email-task --tail=120
```
---
## 4. 根因分析
本次是**组合问题**
1. **内存请求过高 + 副本过多**
- 原始配置:`replicas=3`
- 每个 Pod 请求 `memory=512Mi`
- 单节点场景下,叠加现有业务后无法继续调度
2. **滚动更新默认 `maxSurge=25%`**
- 更新时可能额外起新 Pod,进一步触发内存不足
3. **探针不匹配服务行为**
- 原配置为 `tcpSocket:8080` 探针
- 实际 `email-task` 是任务型服务,日志显示启动后并未提供该端口服务
- 导致 `Readiness/Liveness` 持续失败
---
## 5. 修复方案
仅修改文件:
- `deploy/k8s/service/email/email.yaml`
### 5.1 降低资源请求与副本基线
- `replicas: 3 -> 1`
- `requests.cpu: 500m -> 100m`
- `requests.memory: 512Mi -> 128Mi`
- `limits.cpu: 1000m -> 500m`
- `limits.memory: 1024Mi -> 512Mi`
### 5.2 调整 HPA 基线与上限
- 两个 HPACPU / Memory)统一:
- `minReplicas: 3 -> 1`
- `maxReplicas: 10 -> 3`
### 5.3 调整滚动发布策略
- `strategy.rollingUpdate.maxSurge: 0`
- `strategy.rollingUpdate.maxUnavailable: 1`
目的:避免滚动期间额外拉起 Pod 造成瞬时内存不足。
### 5.4 移除不适配的 8080 TCP 探针
移除:
- `readinessProbe.tcpSocket:8080`
- `livenessProbe.tcpSocket:8080`
---
## 6. 修复执行命令
```powershell
kubectl apply -f deploy/k8s/service/email/email.yaml
kubectl -n juwan rollout restart deploy/email-task
kubectl -n juwan rollout status deploy/email-task --timeout=180s
kubectl -n juwan get pods -l app=email-task -o wide
kubectl -n juwan describe pods -l app=email-task | Select-String -Pattern 'FailedScheduling|Unhealthy|Warning|Events|Node:'
```
---
## 7. 修复结果
- `Deployment` 滚动成功
- 新 Pod 成功调度并 `Running`
- 无新的 `FailedScheduling``Unhealthy` 事件
---
## 8. 后续建议
1. 若要恢复多副本,先按节点容量逐步上调(建议先 2 副本并观测)。
2. 为任务型服务设计更合适的健康检查方式:
- 可考虑 `exec` 探针或业务自检端点。
3. 在单节点开发环境中统一降低默认 `requests`,防止多个服务叠加后调度失败。
4. 如需高可用,建议扩容节点而不是仅依赖压缩资源。
+216
View File
@@ -0,0 +1,216 @@
# Loki 无日志排查与修复手册
## 背景
现象:Grafana Explore 使用 Loki 数据源查询 `{job="kubernetes-pods"}` 时无结果,页面提示 `No logs found`
影响:日志链路不可用,无法按服务排查线上问题。
链路目标:
- Promtail 采集 Kubernetes Pod 日志
- Loki 存储与检索日志
- Grafana 查询展示日志
---
## 一、排查思路
本次按“组件健康 -> 采集发现 -> 文件可读 -> 入库验证 -> 查询验证”的顺序排查。
### 1) 确认监控组件健康
先确认 Promtail/Loki/Grafana 是否都在 Running,避免在异常状态下排查配置。
### 2) 判断是“没采到”还是“没查到”
通过 Promtail `/targets``/service-discovery` 页面确认是否存在 active target。
-`0/0``0/1 unready`,说明采集端有问题
- 若 target 正常但 Loki 无数据,再看推送或查询标签
### 3) 检查 Promtail 能否访问节点日志文件
重点确认:
- `/var/log/pods` 是否可见
- 采集路径是否匹配
- 是否存在大量 `stat ... no such file or directory`
### 4) 直连 Loki API 验证是否入库
绕过 Grafana,直接访问 Loki API。
- 若 API 无数据,问题在 Promtail 采集/推送链路
- 若 API 有数据,问题在 Grafana 查询条件、时间范围或标签
---
## 二、根因分析
本次属于“Promtail 侧采集链路不完整”,主要问题如下:
1. Kubernetes SD 目标未生效(Promtail targets 显示 `kubernetes-pods (0/0)`)。
2. 即使加入静态采集,目标一度 `0/1 unready`
3. `/var/log/pods` 下日志多为符号链接,真实目标在 `/var/lib/docker/containers`
4. Promtail 容器未挂载 `/var/lib/docker/containers`,导致大量 `stat ... no such file or directory`
5. 标签维度不足,不便于按业务服务名筛选日志。
---
## 三、修复思路
### 1) 强化 Promtail 权限与发现能力
`deploy/k8s/monitoring/promtail.yaml` 中:
- 补充 RBAC 资源权限:
- `nodes`
- `pods`
- `pods/log`
- `services`
- `endpoints`
- `namespaces`
### 2) 增加静态采集兜底
`scrape_configs` 中新增 `kubernetes-pods-static`,路径:
- `/var/log/pods/*/*/*.log`
用于在 Kubernetes SD 临时失效时仍能采集日志。
### 3) 修复宿主机日志访问链路
Promtail DaemonSet 增加:
- `securityContext.runAsUser: 0`
- `securityContext.runAsGroup: 0`
- 挂载 `hostPath: /var/lib/docker/containers`
并挂载到容器内同路径只读。
### 4) 完善标签体系,支持按服务筛选
新增/保留标签:
- `namespace`
- `pod`
- `container`
- `app`
静态采集通过 `pipeline_stages``filename` 解析标签,并从 `pod` 生成 `app`(去除滚动后缀)。
---
## 四、关键变更文件
- `deploy/k8s/monitoring/promtail.yaml`
本次 Loki 主配置与 Grafana 数据源无需改动,核心修复集中在 Promtail 采集侧。
---
## 五、排查与修复命令清单
> 以下命令均在项目根目录执行。
### 1) 组件状态检查
```powershell
kubectl get pods -n monitoring -o wide
kubectl get svc -n monitoring
kubectl logs -n monitoring -l app=promtail --tail=120
kubectl logs -n monitoring -l app=loki --tail=80
```
### 2) Promtail 文件系统与配置检查
```powershell
$pod=(kubectl get pod -n monitoring -l app=promtail -o jsonpath='{.items[0].metadata.name}')
kubectl exec -n monitoring $pod -- sh -c "ls -ld /var/log /var/log/pods /var/log/containers"
kubectl exec -n monitoring $pod -- sh -c "find /var/log/pods -name '*.log' | head -n 20"
kubectl exec -n monitoring $pod -- sh -c "cat /etc/promtail/promtail.yaml"
kubectl exec -n monitoring $pod -- sh -c "cat /run/promtail/positions.yaml | head -n 120"
```
### 3) Promtail Web 诊断页(targets / service-discovery
```powershell
$pod=(kubectl get pod -n monitoring -l app=promtail -o jsonpath='{.items[0].metadata.name}')
$job=Start-Job -ScriptBlock { param($p) kubectl port-forward -n monitoring pod/$p 19080:9080 } -ArgumentList $pod
Start-Sleep -Seconds 3
Invoke-WebRequest -UseBasicParsing http://127.0.0.1:19080/targets | Select-Object -ExpandProperty Content
Invoke-WebRequest -UseBasicParsing http://127.0.0.1:19080/service-discovery | Select-Object -ExpandProperty Content
Stop-Job $job -ErrorAction SilentlyContinue
Remove-Job $job -Force -ErrorAction SilentlyContinue
```
### 4) RBAC 实测
```powershell
kubectl auth can-i list pods --as=system:serviceaccount:monitoring:promtail --all-namespaces
kubectl auth can-i watch pods --as=system:serviceaccount:monitoring:promtail --all-namespaces
kubectl auth can-i list namespaces --as=system:serviceaccount:monitoring:promtail
kubectl auth can-i get nodes --as=system:serviceaccount:monitoring:promtail
```
### 5) 应用修复并滚动重启 Promtail
```powershell
kubectl apply -f deploy/k8s/monitoring/promtail.yaml
kubectl rollout restart ds/promtail -n monitoring
kubectl rollout status ds/promtail -n monitoring --timeout=120s
kubectl logs -n monitoring -l app=promtail --tail=120
```
### 6) Loki API 直连验证
```powershell
$job=Start-Job -ScriptBlock { kubectl port-forward -n monitoring svc/loki 13100:3100 }
Start-Sleep -Seconds 3
Invoke-WebRequest -UseBasicParsing "http://127.0.0.1:13100/loki/api/v1/query_range?query=%7Bjob%3D%22kubernetes-pods%22%7D&limit=10" | Select-Object -ExpandProperty Content
Stop-Job $job -ErrorAction SilentlyContinue
Remove-Job $job -Force -ErrorAction SilentlyContinue
```
### 7) 按 app 标签验证
```powershell
$job=Start-Job -ScriptBlock { kubectl port-forward -n monitoring svc/loki 13100:3100 }
Start-Sleep -Seconds 3
Invoke-WebRequest -UseBasicParsing "http://127.0.0.1:13100/loki/api/v1/query_range?query=%7Bjob%3D%22kubernetes-pods%22%2Capp%3D~%22.+%22%7D&limit=5" | Select-Object -ExpandProperty Content
Stop-Job $job -ErrorAction SilentlyContinue
Remove-Job $job -Force -ErrorAction SilentlyContinue
```
---
## 六、Grafana 查询建议
建议先放大时间范围(Last 6 hours / Last 24 hours),再逐步收敛:
```logql
{job="kubernetes-pods"}
{job="kubernetes-pods", namespace="juwan"}
{job="kubernetes-pods", app="user-rpc"}
{job="kubernetes-pods", app=~"user-rpc|snowflake|email-mq"} |= "error"
```
---
## 七、后续优化建议
1. 当前 Loki 使用 `emptyDir`,重建后数据会丢失;生产建议改 PVC 持久化。
2. 可以补充 Promtail 的 `drop` 规则,减少噪音日志(如健康检查日志)。
3. 建议在 Grafana 中预置业务 Dashboard 与告警规则(按 app + error rate)。
---
## 八、结论
本次无日志的核心问题不在 Loki 或 Grafana,而在 Promtail 采集链路:
- 发现目标不稳定 + 日志文件符号链接目标未挂载
完成上述修复后,已可通过 Loki API 查到日志,并支持按 `app` 维度查询。
+174
View File
@@ -0,0 +1,174 @@
# Loki 使用指南(日志查看)
本文说明在当前项目中如何使用 Loki 查看 Kubernetes 日志,包括 Grafana 查询、LogQL 常用语句、命令行验证与常见排错。
---
## 1. 日志链路说明
当前日志链路:
- Promtail 采集节点日志文件
- Loki 存储与检索日志
- Grafana 作为查询与展示入口
相关配置文件:
- `deploy/k8s/monitoring/promtail.yaml`
- `deploy/k8s/monitoring/loki.yaml`
- `deploy/k8s/monitoring/grafana.yaml`
---
## 2. 快速开始(Grafana 查看日志)
### 步骤 1:确认监控组件运行
```bash
kubectl get pods -n monitoring
```
至少应看到 `promtail``loki``grafana``Running`
### 步骤 2:打开 Grafana
```bash
kubectl port-forward -n monitoring svc/grafana 3000:3000
```
浏览器打开:`http://localhost:3000`
默认账号密码(按现有配置):
- 用户名:`admin`
- 密码:`change-me`
### 步骤 3:进入 Explore 查询
- 左侧菜单进入 **Explore**
- 数据源选择 **Loki**
- 时间范围建议先设为 **Last 6 hours****Last 24 hours**
- 输入 LogQL 查询并点击 **Run query**
---
## 3. 常用 LogQL 查询语句
### 3.1 全量日志
```logql
{job="kubernetes-pods"}
```
### 3.2 按命名空间过滤
```logql
{job="kubernetes-pods", namespace="juwan"}
```
### 3.3 按服务(app 标签)过滤
```logql
{job="kubernetes-pods", app="user-rpc"}
```
### 3.4 多服务联合过滤
```logql
{job="kubernetes-pods", app=~"user-rpc|snowflake|email-mq"}
```
### 3.5 按容器名过滤
```logql
{job="kubernetes-pods", container="user-rpc"}
```
### 3.6 关键字过滤(错误日志)
```logql
{job="kubernetes-pods", namespace="juwan"} |= "error"
```
### 3.7 多关键字正则过滤
```logql
{job="kubernetes-pods", namespace="juwan"} |~ "(error|panic|fatal|timeout)"
```
### 3.8 统计最近 5 分钟错误量(按 app)
```logql
sum by (app) (count_over_time({job="kubernetes-pods"} |~ "(?i)error|panic|fatal" [5m]))
```
---
## 4. 不经过 Grafana 的直连验证(Loki API
用于区分“Grafana 查询问题”与“日志未入库问题”。
### 4.1 端口转发 Loki
```bash
kubectl port-forward -n monitoring svc/loki 3100:3100
```
### 4.2 查询是否有流数据
```bash
curl "http://127.0.0.1:3100/loki/api/v1/query_range?query={job=\"kubernetes-pods\"}&limit=10"
```
### 4.3 查询 app 标签流
```bash
curl "http://127.0.0.1:3100/loki/api/v1/query_range?query={job=\"kubernetes-pods\",app=~\".+\"}&limit=10"
```
如果 API 返回 `result` 非空,说明 Loki 已正常入库。
---
## 5. 常见问题与处理
### 问题 1Grafana 显示 No logs found
建议按顺序检查:
1. 时间范围是否太短(先调大到 6h/24h)
2. 查询标签是否过窄(先用 `{job="kubernetes-pods"}`
3. Promtail 是否正常运行并有 target
4. Loki API 是否能直接查到数据
### 问题 2Promtail 有 Running 但仍无日志
重点检查:
- `promtail` targets 是否 `ready`
- 是否存在 `stat ... no such file or directory`
- 是否挂载日志目录(`/var/log``/var/lib/docker/containers`
- 是否有足够 RBAC 权限(pods/nodes/namespaces 等)
### 问题 3:查不到某个服务日志
建议检查:
- 该服务 pod 是否在运行并产生日志
- `namespace``app` 过滤条件是否正确
- 先用 `namespace` 过滤,再逐步加 `app``container` 条件
---
## 6. 推荐查询习惯
1. 先粗后细:全量 -> namespace -> app -> container -> 关键字
2. 先看时间范围:避免默认 1h 漏查
3. 遇到空结果先用 Loki API 验证入库
4. 保存常用查询到 Grafana Dashboard,便于团队复用
---
## 7. 参考
- Loki 故障排查文档:`docs/loki-log-troubleshooting.md`
+7 -3
View File
@@ -3,7 +3,11 @@ module juwan-backend
go 1.25.1 go 1.25.1
require ( require (
github.com/golang-jwt/jwt/v4 v4.5.2
github.com/google/uuid v1.6.0
github.com/redis/go-redis/v9 v9.17.3
github.com/zeromicro/go-zero v1.10.0 github.com/zeromicro/go-zero v1.10.0
golang.org/x/crypto v0.46.0
google.golang.org/grpc v1.79.1 google.golang.org/grpc v1.79.1
google.golang.org/protobuf v1.36.11 google.golang.org/protobuf v1.36.11
) )
@@ -26,12 +30,10 @@ require (
github.com/go-openapi/swag v0.22.4 // indirect github.com/go-openapi/swag v0.22.4 // indirect
github.com/go-sql-driver/mysql v1.9.3 // indirect github.com/go-sql-driver/mysql v1.9.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.7.0 // indirect github.com/google/go-cmp v0.7.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/grafana/pyroscope-go v1.2.7 // indirect github.com/grafana/pyroscope-go v1.2.7 // indirect
github.com/grafana/pyroscope-go/godeltaprof v0.1.9 // indirect github.com/grafana/pyroscope-go/godeltaprof v0.1.9 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
@@ -46,12 +48,14 @@ require (
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/openzipkin/zipkin-go v0.4.3 // indirect github.com/openzipkin/zipkin-go v0.4.3 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/pierrec/lz4/v4 v4.1.21 // indirect
github.com/prometheus/client_golang v1.23.2 // indirect github.com/prometheus/client_golang v1.23.2 // indirect
github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.66.1 // indirect github.com/prometheus/common v0.66.1 // indirect
github.com/prometheus/procfs v0.16.1 // indirect github.com/prometheus/procfs v0.16.1 // indirect
github.com/redis/go-redis/v9 v9.17.3 // indirect github.com/segmentio/kafka-go v0.4.47 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/zeromicro/go-queue v1.2.2 // indirect
go.etcd.io/etcd/api/v3 v3.5.15 // indirect go.etcd.io/etcd/api/v3 v3.5.15 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.15 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.15 // indirect
go.etcd.io/etcd/client/v3 v3.5.15 // indirect go.etcd.io/etcd/client/v3 v3.5.15 // indirect
+44
View File
@@ -80,6 +80,7 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
@@ -113,6 +114,9 @@ github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7s
github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c= github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c=
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@@ -131,6 +135,8 @@ github.com/redis/go-redis/v9 v9.17.3 h1:fN29NdNrE17KttK5Ndf20buqfDZwGNgoUr9qjl1D
github.com/redis/go-redis/v9 v9.17.3/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370= github.com/redis/go-redis/v9 v9.17.3/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/segmentio/kafka-go v0.4.47 h1:IqziR4pA3vrZq7YdRxaT3w1/5fvIH5qpCwstUanQQB0=
github.com/segmentio/kafka-go v0.4.47/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
@@ -146,10 +152,16 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M=
github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
github.com/zeromicro/go-queue v1.2.2 h1:3TMhRlI/8lZy13Sj6FBBWWRXlsQhGCchRxY2itfV1Is=
github.com/zeromicro/go-queue v1.2.2/go.mod h1:5HiNTEw1tACi9itho0JYQ1+EpIGpSFM4tOQ4bit+yKM=
github.com/zeromicro/go-zero v1.10.0 h1:+qfAqj+BGt0qjW1PQk2VO5WLwIQBh60CA3OTLsBosS8= github.com/zeromicro/go-zero v1.10.0 h1:+qfAqj+BGt0qjW1PQk2VO5WLwIQBh60CA3OTLsBosS8=
github.com/zeromicro/go-zero v1.10.0/go.mod h1:qAModWGsfkrBl0JP9oS7K7k6dgucExOuQdpzHyXVKLg= github.com/zeromicro/go-zero v1.10.0/go.mod h1:qAModWGsfkrBl0JP9oS7K7k6dgucExOuQdpzHyXVKLg=
go.etcd.io/etcd/api/v3 v3.5.15 h1:3KpLJir1ZEBrYuV2v+Twaa/e2MdDCEZ/70H+lzEiwsk= go.etcd.io/etcd/api/v3 v3.5.15 h1:3KpLJir1ZEBrYuV2v+Twaa/e2MdDCEZ/70H+lzEiwsk=
@@ -199,12 +211,23 @@ go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
@@ -212,17 +235,36 @@ golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwE
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4= golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4=
@@ -231,6 +273,8 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+1 -1
View File
@@ -1,5 +1,5 @@
{ {
"name": "juwan-backend", "name": "st-1-example",
"lockfileVersion": 3, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {