目录
PodGCController 结构体 及 controller 注册过程
当时遇到的问题是这样的,运维同学 kubectl delete node xxx 之后,隔一段时间之后该宿主机上的pod会被gc掉,而通过kubectl get pod 获取时,返回的是not found。理论上,kubernetes controller会自动拉起该容器,但是由于我们pod和node有绑定关系,当绑定关系没有删除的时候sts controler不会去同步拉起pod容器。删除绑定关系的时候,冷却队列会进行1000s同步时间,才会将该pod调度绑定成功(这是另一个问题)。
带着问题看代码,主要弄明白两个问题?
--controllers stringSlice Default: [*]
A list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller named 'foo', '-foo' disables the controller named 'foo'.
All controllers: attachdetach, bootstrapsigner, cloud-node-lifecycle, clusterrole-aggregation, cronjob, csrapproving, csrcleaner, csrsigning, daemonset, deployment, disruption, endpoint, garbagecollector, horizontalpodautoscaling, job, namespace, nodeipam, nodelifecycle, persistentvolume-binder, persistentvolume-expander, podgc, pv-protection, pvc-protection, replicaset, replicationcontroller, resourcequota, root-ca-cert-publisher, route, service, serviceaccount, serviceaccount-token, statefulset, tokencleaner, ttl, ttl-after-finished
Disabled-by-default controllers: bootstrapsigner, tokencleaner
--terminated-pod-gc-threshold int32 Default: 12500
Number of terminated pods that can exist before the terminated pod garbage collector starts deleting terminated pods. If <= 0, the terminated pod garbage collector is disabled.
设置可保存的终止pod数量,超过该数量时,垃圾回收器将开始进行删除操作。
设置为不大于0的值表示 不启用该功能,默认值是12500.
代码路径:pkg/controller/podgc/gc_controller.go
type PodGCController struct {
kubeClient clientset.Interface // 与api-server通信的客户端
podLister corelisters.PodLister // 监听podList 变化
podListerSynced cache.InformerSynced // 判断podList是否同步完成
deletePod func(namespace, name string) error //调用api-server删除pod
terminatedPodThreshold int // 设置保存终止pod数量
}
初始化注册podgc:controllers["podgc"] = startPodGCController
调用 NewPodGC进行初始化,并调用Run方法。
controllers["podgc"] = startPodGCController
func startPodGCController(ctx ControllerContext) (http.Handler, bool, error) {
go podgc.NewPodGC(
ctx.ClientBuilder.ClientOrDie("pod-garbage-collector"),
ctx.InformerFactory.Core().V1().Pods(),
int(ctx.ComponentConfig.PodGCController.TerminatedPodGCThreshold),
).Run(ctx.Stop)
return nil, true, nil
}
kubeClient.CoreV1().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0)) 表示优雅退出时间为0,立即删除pod容器。即 kubectl delete pod xxx --force --grace-period=0 强制删除,具体可以看kubernetes接口。
func NewPodGC(kubeClient clientset.Interface, podInformer coreinformers.PodInformer, terminatedPodThreshold int) *PodGCController {
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("gc_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
}
gcc := &PodGCController{
kubeClient: kubeClient,
terminatedPodThreshold: terminatedPodThreshold,
deletePod: func(namespace, name string) error {
glog.Infof("PodGC is force deleting Pod: %v/%v", namespace, name)
return kubeClient.CoreV1().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0))
},
}
gcc.podLister = podInformer.Lister()
gcc.podListerSynced = podInformer.Informer().HasSynced
return gcc
}
启动go协程,每间隔20s执行完一次gcc.gc进行Pod回收。
func (gcc *PodGCController) Run(stop <-chan struct{}) {
defer utilruntime.HandleCrash()
glog.Infof("Starting GC controller")
defer glog.Infof("Shutting down GC controller")
if !controller.WaitForCacheSync("GC", stop, gcc.podListerSynced) {
return
}
// Until loops until stop channel is closed, running f every period.
// gcCheckPeriod = 20 * time.Second
go wait.Until(gcc.gc, gcCheckPeriod, stop)
<-stop
}
func (gcc *PodGCController) gc() {
pods, err := gcc.podLister.List(labels.Everything())
if err != nil {
glog.Errorf("Error while listing all Pods: %v", err)
return
}
if gcc.terminatedPodThreshold > 0 {
gcc.gcTerminated(pods)
}
gcc.gcOrphaned(pods)
gcc.gcUnscheduledTerminating(pods)
}
终止pod容器大于保存预留直接回收 终止的pod直接删除。
func (gcc *PodGCController) gcTerminated(pods []*v1.Pod) {
terminatedPods := []*v1.Pod{}
for _, pod := range pods {
// 非 pending running unknown 状态
if isPodTerminated(pod) {
terminatedPods = append(terminatedPods, pod)
}
}
terminatedPodCount := len(terminatedPods)
// 按照创建时间进行排序
sort.Sort(byCreationTimestamp(terminatedPods))
deleteCount := terminatedPodCount - gcc.terminatedPodThreshold
if deleteCount > terminatedPodCount {
deleteCount = terminatedPodCount
}
if deleteCount > 0 {
glog.Infof("garbage collecting %v pods", deleteCount)
}
// 删除回收
var wait sync.WaitGroup
for i := 0; i < deleteCount; i++ {
wait.Add(1)
go func(namespace string, name string) {
defer wait.Done()
if err := gcc.deletePod(namespace, name); err != nil {
// ignore not founds
defer utilruntime.HandleError(err)
}
}(terminatedPods[i].Namespace, terminatedPods[i].Name)
}
wait.Wait()
}
代码注解是:gcOrphaned deletes pods that are bound to nodes that don't exist. 回收绑定到不存在node上的pod容器即孤儿容器。
孤儿pod容器 绑定的node 不存在集群中 进行回收。 至于回收之后,sts controller会不会自动拉起(理论上是状态不一致会拉起),这还得实验进行验证下?
func (gcc *PodGCController) gcOrphaned(pods []*v1.Pod) {
glog.V(4).Infof("GC'ing orphaned")
// We want to get list of Nodes from the etcd, to make sure that it's as fresh as possible.
nodes, err := gcc.kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return
}
nodeNames := sets.NewString()
for i := range nodes.Items {
nodeNames.Insert(nodes.Items[i].Name)
}
for _, pod := range pods {
// 未调度pod容器
if pod.Spec.NodeName == "" {
continue
}
// pod绑定的node存在集群中
if nodeNames.Has(pod.Spec.NodeName) {
continue
}
// 孤儿pod容器 绑定的node 不存在集群中 进行回收
glog.V(2).Infof("Found orphaned Pod %v/%v assigned to the Node %v. Deleting.", pod.Namespace, pod.Name, pod.Spec.NodeName)
if err := gcc.deletePod(pod.Namespace, pod.Name); err != nil {
utilruntime.HandleError(err)
} else {
glog.V(0).Infof("Forced deletion of orphaned Pod %v/%v succeeded", pod.Namespace, pod.Name)
}
}
}
删除是 删除状态的,或者没有被调度到node的pod gcUnscheduledTerminating deletes pods that are terminating and haven't been scheduled to a particular node. 那其实这里我很好奇的是,sts 控制器缩容的时候 是将pod状态设置为 terminating 状态吗?调度不成功pending状态的pod会删除吗?
func (gcc *PodGCController) gcUnscheduledTerminating(pods []*v1.Pod) {
glog.V(4).Infof("GC'ing unscheduled pods which are terminating.")
for _, pod := range pods {
if pod.DeletionTimestamp == nil || len(pod.Spec.NodeName) > 0 {
continue
}
glog.V(2).Infof("Found unscheduled terminating Pod %v/%v not assigned to any Node. Deleting.", pod.Namespace, pod.Name)
if err := gcc.deletePod(pod.Namespace, pod.Name); err != nil {
utilruntime.HandleError(err)
} else {
glog.V(0).Infof("Forced deletion of unscheduled terminating Pod %v/%v succeeded", pod.Namespace, pod.Name)
}
}
}