当前位置: 首页 > 知识库问答 >
问题:

Kubernetes滚动更新不工作

竺绍辉
2023-03-14

我为不同的项目安装了两个kubernetes,在我所看到的最好的情况下,它们在重要的区域具有等效的配置,但这两个执行滚动更新的方式不同。

两者都是使用KOPS安装在AWS上的。

{
  "kind": "Deployment",
  "apiVersion": "extensions/v1beta1",
  "metadata": {
    "name": "proxy-deployment",
    "namespace": "namespace",
    "selfLink": "/apis/extensions/v1beta1/namespaces/namespace/deployments/proxy-deployment",
    "uid": "d12778ba-8950-11e7-9e69-12f38e55b21a",
    "resourceVersion": "31538492",
    "generation": 7,
    "creationTimestamp": "2017-08-25T04:49:45Z",
    "labels": {
      "app": "proxy"
    },
    "annotations": {
      "deployment.kubernetes.io/revision": "6",
      "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"apps/v1beta1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"name\":\"proxy-deployment\",\"namespace\":\"namespace\"},\"spec\":{\"replicas\":2,\"template\":{\"metadata\":{\"labels\":{\"app\":\"proxy\"}},\"spec\":{\"containers\":[{\"image\":\"xxxxxxxxxxx.dkr.ecr.us-east-1.amazonaws.com/nginx-proxy-xxxxxx:latest\",\"name\":\"proxy-ctr\",\"ports\":[{\"containerPort\":80},{\"containerPort\":8080}]}]}}}}\n"
    }
  },
  "spec": {
    "replicas": 1,
    "selector": {
      "matchLabels": {
        "app": "proxy"
      }
    },
    "template": {
      "metadata": {
        "creationTimestamp": null,
        "labels": {
          "app": "proxy",
          "date": "1522386390"
        }
      },
      "spec": {
        "containers": [
          {
            "name": "proxy-ctr",
            "image": "xxxxxxxxxxx.dkr.ecr.us-east-1.amazonaws.com/nginx-proxy-xxxxxx:latest",
            "ports": [
              {
                "containerPort": 80,
                "protocol": "TCP"
              },
              {
                "containerPort": 8080,
                "protocol": "TCP"
              }
            ],
            "resources": {},
            "terminationMessagePath": "/dev/termination-log",
            "terminationMessagePolicy": "File",
            "imagePullPolicy": "Always"
          }
        ],
        "restartPolicy": "Always",
        "terminationGracePeriodSeconds": 30,
        "dnsPolicy": "ClusterFirst",
        "securityContext": {},
        "schedulerName": "default-scheduler"
      }
    },
    "strategy": {
      "type": "RollingUpdate",
      "rollingUpdate": {
        "maxUnavailable": "25%",
        "maxSurge": "25%"
      }
    },
    "revisionHistoryLimit": 2,
    "progressDeadlineSeconds": 600
  },
  "status": {
    "observedGeneration": 7,
    "replicas": 1,
    "updatedReplicas": 1,
    "readyReplicas": 1,
    "availableReplicas": 1,
    "conditions": [
      {
        "type": "Progressing",
        "status": "True",
        "lastUpdateTime": "2018-03-30T05:03:01Z",
        "lastTransitionTime": "2017-08-25T04:49:45Z",
        "reason": "NewReplicaSetAvailable",
        "message": "ReplicaSet \"proxy-deployment-1457650622\" has successfully progressed."
      },
      {
        "type": "Available",
        "status": "True",
        "lastUpdateTime": "2018-06-01T06:55:12Z",
        "lastTransitionTime": "2018-06-01T06:55:12Z",
        "reason": "MinimumReplicasAvailable",
        "message": "Deployment has minimum availability."
      }
    ]
  }
}
{
  "kind": "Deployment",
  "apiVersion": "extensions/v1beta1",
  "metadata": {
    "name": "prodefault-deployment",
    "namespace": "namespace",
    "selfLink": "/apis/extensions/v1beta1/namespaces/namespace/deployments/prodefault-deployment",
    "uid": "a80528c8-eb79-11e7-9364-068125440f70",
    "resourceVersion": "25203392",
    "generation": 10,
    "creationTimestamp": "2017-12-28T02:49:00Z",
    "labels": {
      "app": "prodefault"
    },
    "annotations": {
      "deployment.kubernetes.io/revision": "7",
      "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"apps/v1beta1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"name\":\"prodefault-deployment\",\"namespace\":\"namespace\"},\"spec\":{\"replicas\":1,\"strategy\":{\"rollingUpdate\":{\"maxSurge\":\"25%\",\"maxUnavailable\":\"25%\"},\"type\":\"RollingUpdate\"},\"template\":{\"metadata\":{\"labels\":{\"app\":\"prodefault\"}},\"spec\":{\"containers\":[{\"image\":\"xxxxxxxxxxxx.dkr.ecr.us-west-2.amazonaws.com/xxxxxxxxxxx-pro-default:latest\",\"livenessProbe\":{\"httpGet\":{\"path\":\"/healthchk\",\"port\":80},\"initialDelaySeconds\":120,\"periodSeconds\":15,\"timeoutSeconds\":1},\"name\":\"prodefault-ctr\",\"ports\":[{\"containerPort\":80}],\"readinessProbe\":{\"httpGet\":{\"path\":\"/healthchk\",\"port\":80},\"initialDelaySeconds\":5,\"periodSeconds\":2,\"timeoutSeconds\":3},\"resources\":{\"limits\":{\"cpu\":\"1\",\"memory\":\"1024Mi\"},\"requests\":{\"cpu\":\"150m\",\"memory\":\"256Mi\"}},\"volumeMounts\":[{\"mountPath\":\"/var/www/html/homes\",\"name\":\"efs-pvc\"},{\"mountPath\":\"/var/xero\",\"name\":\"xero-key\",\"readOnly\":true},{\"mountPath\":\"/var/gcal\",\"name\":\"gcal-json\",\"readOnly\":true}]}],\"volumes\":[{\"name\":\"efs-pvc\",\"persistentVolumeClaim\":{\"claimName\":\"tio-pv-claim-homes\"}},{\"name\":\"xero-key\",\"secret\":{\"secretName\":\"xero-key\"}},{\"name\":\"gcal-json\",\"secret\":{\"secretName\":\"gcaljson\"}}]}}}}\n"
    }
  },
  "spec": {
    "replicas": 1,
    "selector": {
      "matchLabels": {
        "app": "prodefault"
      }
    },
    "template": {
      "metadata": {
        "creationTimestamp": null,
        "labels": {
          "app": "prodefault"
        }
      },
      "spec": {
        "volumes": [
          {
            "name": "efs-pvc",
            "persistentVolumeClaim": {
              "claimName": "tio-pv-claim-homes"
            }
          },
          {
            "name": "xero-key",
            "secret": {
              "secretName": "xero-key",
              "defaultMode": 420
            }
          },
          {
            "name": "gcal-json",
            "secret": {
              "secretName": "gcaljson",
              "defaultMode": 420
            }
          }
        ],
        "containers": [
          {
            "name": "prodefault-ctr",
            "image": "xxxxxxxxxxxx.dkr.ecr.us-west-2.amazonaws.com/xxxxxxxxxxx-pro-default:latest",
            "ports": [
              {
                "containerPort": 80,
                "protocol": "TCP"
              }
            ],
            "resources": {
              "limits": {
                "cpu": "1",
                "memory": "1Gi"
              },
              "requests": {
                "cpu": "150m",
                "memory": "256Mi"
              }
            },
            "volumeMounts": [
              {
                "name": "efs-pvc",
                "mountPath": "/var/www/html/homes"
              },
              {
                "name": "xero-key",
                "readOnly": true,
                "mountPath": "/var/xero"
              },
              {
                "name": "gcal-json",
                "readOnly": true,
                "mountPath": "/var/gcal"
              }
            ],
            "livenessProbe": {
              "httpGet": {
                "path": "/healthchk",
                "port": 80,
                "scheme": "HTTP"
              },
              "initialDelaySeconds": 120,
              "timeoutSeconds": 1,
              "periodSeconds": 15,
              "successThreshold": 1,
              "failureThreshold": 3
            },
            "readinessProbe": {
              "httpGet": {
                "path": "/healthchk",
                "port": 80,
                "scheme": "HTTP"
              },
              "initialDelaySeconds": 5,
              "timeoutSeconds": 3,
              "periodSeconds": 2,
              "successThreshold": 1,
              "failureThreshold": 3
            },
            "terminationMessagePath": "/dev/termination-log",
            "terminationMessagePolicy": "File",
            "imagePullPolicy": "Always"
          }
        ],
        "restartPolicy": "Always",
        "terminationGracePeriodSeconds": 30,
        "dnsPolicy": "ClusterFirst",
        "securityContext": {},
        "schedulerName": "default-scheduler"
      }
    },
    "strategy": {
      "type": "RollingUpdate",
      "rollingUpdate": {
        "maxUnavailable": "25%",
        "maxSurge": "25%"
      }
    },
    "revisionHistoryLimit": 2,
    "progressDeadlineSeconds": 600
  },
  "status": {
    "observedGeneration": 10,
    "replicas": 1,
    "updatedReplicas": 1,
    "readyReplicas": 1,
    "availableReplicas": 1,
    "conditions": [
      {
        "type": "Progressing",
        "status": "True",
        "lastUpdateTime": "2018-01-15T06:07:52Z",
        "lastTransitionTime": "2017-12-28T03:00:16Z",
        "reason": "NewReplicaSetAvailable",
        "message": "ReplicaSet \"prodefault-deployment-9685f46d4\" has successfully progressed."
      },
      {
        "type": "Available",
        "status": "True",
        "lastUpdateTime": "2018-06-13T07:12:41Z",
        "lastTransitionTime": "2018-06-13T07:12:41Z",
        "reason": "MinimumReplicasAvailable",
        "message": "Deployment has minimum availability."
      }
    ]
  }
}

共有1个答案

呼延聪
2023-03-14

我注意到两个POD都定义了以下滚动更新策略:

"strategy": {
  "type": "RollingUpdate",
  "rollingUpdate": {
    "maxUnavailable": "25%",
    "maxSurge": "25%"
  }
},

这样,在通过'set image'或'kubectlapply'正常滚动更新创建新pod之后,它应该终止旧pod。

所以两个系统之间的不同行为可能来自于仪表板。我猜您在两个系统中运行的dashboard版本不同,因为根据dashboard的兼容性度量,kubernetes V1.7需要dashboard 1.7来支持,而kubernetes V1.8需要dashboard 1.8来支持。也许版本不同的仪表板处理‘杀戮Pod’作为不同的行动,我不知道。

最后,不要使用“kill pod”做滚动更新。

 类似资料:
  • 使用Helm创建Tomcat吊舱 helm创建hello-world 更改了deployment.yaml中的映像名和部署名

  • 为了在生产中部署express Node.js api,我希望使用kubernetes,以下步骤如下: 在google cloud中创建集群 从git将代码克隆到集群 Docker build-t gcr.io/[GCLOUDID]/app:v1。 Docker pushgcr.io/[GCLOUDID]/app:v1 Kubectl运行app--image=gcr.io/[GCLOUDID]/a

  • 由于设计初衷是作为多用户,Anisible很擅长在某一个主机上代表另一个做事,或者参考远程主机做一些本地工作. 这个特性对于架设连续实现某些设施或者0暂停滚动升级,这里你可能会提到负载均衡或者监控系统. 更多的特性允许调试事情完成的顺序,和设置一批窗口,来确定多少机器在一次中滚动更新. 这节会介绍所有这些特性.`详情案例参见 <http://github.com/ansible/ansible-e

  • 问题内容: 我有一个测试JDBC程序,该程序试图更改ResultSet的可滚动性和可更新性功能。不幸的是,所有组合,并似乎产生相同的结果(和)。 即使使用默认值(),也可以在ResultSet中滚动。谁能解释为什么? 我正在使用MySQL 5.6和JDK7。这是代码: 问题答案: 正如马克Rotteveel到问题的评论中提到,MySQL的高速缓存默认的ResultSet数据(也由Ben J. Ch

  • 删除缓存的存档以强制重新下载: 执行明确的回购更新 比较github上8天前修改的最新版本:https://github.com/confluentinc/cp-helm-charts/blob/master/charts/cp-kafka/templates/statefulset.yaml#L140 github版本具有。Helm提取的版本只有而没有后面的。 为什么Helm拉出的文件在GitH

  • 我有一个嵌入式Hazelcast实例,我将其配置为使用k8s API(服务发现),并使用Route Policy拥有一个主动/被动集群FTP阅读器。 我在集群中只有3个实例。我的滚动更新是在替换旧实例之前首先添加一个新实例。 我得到这个错误,当我做滚动更新。 看起来Hazelcast实例在骆驼有机会为这个集群选出新的领导者之前就停止了。 问题: 在代码中创建Hazelcast实例时,是否需要将实例