// ============================================================ // juwan-backend CD Pipeline — Harbor 主动轮询模式 // 适用场景:本地开发机无公网,Jenkins 主动检测 Harbor 镜像更新 // // 工作原理: // 1. Jenkins 定时(默认每 2 分钟)调用 Harbor Registry API // 2. 对比每个镜像的最新 digest 与上次记录的 digest // 3. 发现变化则触发对应服务的 kubectl rollout restart // // 前置条件(见 docs/jenkins-cd/01-local-dev-setup.md): // - Jenkins 凭据:harbor-credentials(用户名/密码) // - Jenkins 凭据:kubeconfig-dev(Secret file,k3s kubeconfig) // - Jenkins 节点上已安装 kubectl 并可访问 k3s 集群 // ============================================================ pipeline { agent any // ── 可调参数 ────────────────────────────────────────────── parameters { // Harbor 地址,不含协议前缀 string( name: 'HARBOR_REGISTRY', defaultValue: '103.236.53.208:4418', description: 'Harbor 镜像仓库地址(host:port)' ) string( name: 'HARBOR_PROJECT', defaultValue: 'juwan', description: 'Harbor 项目名' ) string( name: 'K8S_NAMESPACE', defaultValue: 'juwan', description: 'Kubernetes 命名空间' ) // 轮询时只检查 latest tag string( name: 'IMAGE_TAG', defaultValue: 'latest', description: '要监听的镜像 Tag' ) string( name: 'TARGET_SERVICES', defaultValue: '', description: '指定要部署的服务(逗号分隔,如 user-api,user-rpc)。留空则自动从 Harbor 查询实际存在的仓库' ) booleanParam( name: 'FORCE_DEPLOY', defaultValue: false, description: '强制部署所有服务(忽略 digest 对比,用于手动触发全量更新)' ) } // ── 触发器:每 2 分钟轮询一次 ──────────────────────────── triggers { // cron 表达式:H/2 * * * * → 每 2 分钟 // 生产环境改用 Webhook,删除此 triggers 块即可 cron('H/2 * * * *') } environment { // Harbor API v2 基础路径 HARBOR_API = "http://${params.HARBOR_REGISTRY}/api/v2.0" // digest 状态文件存放目录(Jenkins workspace 内持久化) DIGEST_STATE_DIR = "${WORKSPACE}/.digest-state" // kubectl 命令(节点上的实际路径) KUBECTL = 'kubectl' } stages { // ── Stage 1:初始化 ─────────────────────────────────── stage('Init') { steps { script { echo "=== juwan-backend CD Pipeline (Poll Mode) ===" echo "Harbor: ${params.HARBOR_REGISTRY}/${params.HARBOR_PROJECT}" echo "Namespace: ${params.K8S_NAMESPACE}" echo "Tag: ${params.IMAGE_TAG}" // 创建 digest 状态目录(跨构建持久化) sh "mkdir -p ${DIGEST_STATE_DIR}" // 构建要检查的服务列表 if (params.TARGET_SERVICES?.trim()) { env.SERVICE_LIST = params.TARGET_SERVICES.trim() echo "使用指定服务列表: ${env.SERVICE_LIST}" } else { // 直接查询 Harbor API,只监控实际存在的仓库 // 避免扫描 k8s yaml 导致检查大量不存在的镜像 withCredentials([ usernamePassword( credentialsId: 'harbor-credentials', usernameVariable: 'HARBOR_USER', passwordVariable: 'HARBOR_PASS' ) ]) { def harborListUrl = "${HARBOR_API}/projects/${params.HARBOR_PROJECT}/repositories?page_size=100" def services = sh( script: """ HARBOR_LIST_URL="${harborListUrl}" curl -s -u "\${HARBOR_USER}:\${HARBOR_PASS}" \\ --connect-timeout 10 --max-time 30 \\ "\${HARBOR_LIST_URL}" \\ | python3 -c ' import sys, json repos = json.load(sys.stdin) if not isinstance(repos, list): sys.exit(1) names = [r["name"].split("/")[-1] for r in repos] print(",".join(sorted(names))) ' 2>/dev/null || echo "" """, returnStdout: true ).trim() if (!services) { error("无法从 Harbor 获取仓库列表,请检查 harbor-credentials 凭据和 Harbor 地址") } env.SERVICE_LIST = services } echo "从 Harbor 自动发现服务列表: ${env.SERVICE_LIST}" } } } } // ── Stage 2:检测镜像变化 ───────────────────────────── stage('Detect Changes') { steps { script { withCredentials([ usernamePassword( credentialsId: 'harbor-credentials', usernameVariable: 'HARBOR_USER', passwordVariable: 'HARBOR_PASS' ) ]) { def serviceList = env.SERVICE_LIST.split(',').collect { it.trim() }.findAll { it } def changedServices = [] serviceList.each { svc -> if (!svc) return echo "检查镜像: ${svc}:${params.IMAGE_TAG}" def digestResult = sh( script: """ curl -s -u "\${HARBOR_USER}:\${HARBOR_PASS}" \\ --connect-timeout 10 \\ --max-time 30 \\ "${HARBOR_API}/projects/${params.HARBOR_PROJECT}/repositories/${svc}/artifacts/${params.IMAGE_TAG}" \\ | python3 -c "import sys,json; d=json.load(sys.stdin); print(d.get('digest',''))" 2>/dev/null || echo "" """, returnStdout: true ).trim() if (!digestResult) { echo " ⚠️ 无法获取 ${svc} 的 digest,跳过" return } def stateFile = "${DIGEST_STATE_DIR}/${svc}.digest" def lastDigest = fileExists(stateFile) ? readFile(stateFile).trim() : "" echo " 当前 digest: ${digestResult}" echo " 上次 digest: ${lastDigest ?: '(首次检测)'}" if (params.FORCE_DEPLOY || digestResult != lastDigest) { if (params.FORCE_DEPLOY) { echo " 🔄 强制部署模式,加入部署队列" } else { echo " ✅ 检测到变化,加入部署队列" } changedServices << svc writeFile file: stateFile, text: digestResult } else { echo " — 无变化,跳过" } } env.CHANGED_SERVICES = changedServices.join(',') echo "需要更新的服务: ${env.CHANGED_SERVICES ?: '(无变化)'}" } } } } // ── Stage 3:部署到 k3s ─────────────────────────────── stage('Deploy') { when { expression { env.CHANGED_SERVICES?.trim() } } steps { script { withCredentials([ file(credentialsId: 'kubeconfig-dev', variable: 'KUBECONFIG_FILE') ]) { def changedList = env.CHANGED_SERVICES.split(',').collect { it.trim() }.findAll { it } changedList.each { svc -> echo "=== 部署 ${svc} ===" // 验证 Deployment 是否存在 def exists = sh( script: """ KUBECONFIG=\${KUBECONFIG_FILE} ${KUBECTL} get deployment ${svc} \\ -n ${params.K8S_NAMESPACE} \\ --ignore-not-found \\ -o name 2>/dev/null || echo "" """, returnStdout: true ).trim() if (!exists) { echo " ⚠️ Deployment ${svc} 不存在于 ${params.K8S_NAMESPACE},跳过" return } // 触发滚动重启(imagePullPolicy: Always 会拉取最新 latest 镜像) sh """ KUBECONFIG=\${KUBECONFIG_FILE} ${KUBECTL} rollout restart deployment/${svc} \\ -n ${params.K8S_NAMESPACE} """ // 等待滚动更新完成(超时 3 分钟) def rolloutStatus = sh( script: """ KUBECONFIG=\${KUBECONFIG_FILE} ${KUBECTL} rollout status deployment/${svc} \\ -n ${params.K8S_NAMESPACE} \\ --timeout=180s """, returnStatus: true ) if (rolloutStatus == 0) { echo " ✅ ${svc} 部署成功" } else { // 部署失败:回滚并标记失败 echo " ❌ ${svc} 部署超时或失败,执行回滚..." sh """ KUBECONFIG=\${KUBECONFIG_FILE} ${KUBECTL} rollout undo deployment/${svc} \\ -n ${params.K8S_NAMESPACE} """ error("${svc} 部署失败,已回滚") } } } } } } // ── Stage 4:部署摘要 ───────────────────────────────── stage('Summary') { steps { script { if (env.CHANGED_SERVICES?.trim()) { echo "=== 本次部署完成 ===" echo "已更新服务: ${env.CHANGED_SERVICES}" } else { echo "=== 无服务需要更新 ===" } } } } } post { failure { echo "❌ Pipeline 失败,请检查上方日志" } success { echo "✅ Pipeline 执行完成" } } }