记录一下k8s 1.23 及keda 2.7.1 安装测试过程。
指定国内registry 及版本进行安装:
kubeadm init --apiserver-advertise-address=192.168.XXX.XXX --image-repository \
registry.aliyuncs.com/google_containers --kubernetes-version v1.23.5 \
--pod-network-cidr=10.244.0.0/16
kubeadm join 192.168.XXX.XXX:6443 --token t2o9eq.xxxx \
--discovery-token-ca-cert-hash sha256:xxxx
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
查看结果:
root@ubuntu:~/k8s# kubectl get nodes
NAME STATUS ROLES AGE VERSION
ubuntu Ready control-plane,master 2m v1.23.7
ubuntu152 Ready <none> 59s v1.23.7
ubuntu153 Ready <none> 50s v1.23.7
root@ubuntu:~/k8s# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-6d8c4cb4d-fp5gn 1/1 Running 0 107s
kube-system coredns-6d8c4cb4d-rb5c5 1/1 Running 0 107s
kube-system etcd-ubuntu 1/1 Running 4 2m13s
kube-system kube-apiserver-ubuntu 1/1 Running 5 2m6s
kube-system kube-controller-manager-ubuntu 1/1 Running 0 2m10s
kube-system kube-proxy-jszpl 1/1 Running 0 73s
kube-system kube-proxy-m7942 1/1 Running 0 107s
kube-system kube-proxy-qg9mt 1/1 Running 0 64s
kube-system kube-scheduler-ubuntu 1/1 Running 12 2m7s
kubectl apply -f flannel/kube-flannel.yml
安装metric-server,修改为国内的registry及不是要tls(生产不建议跳过tls)
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --kubelet-insecure-tls
image: registry.aliyuncs.com/google_containers/metrics-server:v0.6.1
imagePullPolicy: IfNotPresent
kubectl apply -f metrics-server/metrics-server.yaml
查看metrics-server是否成功安装:
kubectl top pod --all-namespaces;
先编译好app及dummy image并分发到registry或者各node,注意 app的dockerfile少了 go build。
FROM golang:1.15
WORKDIR /go/src/app
COPY helper/ .
RUN go env -w GOPROXY=https://goproxy.cn
RUN go get -v ./...
RUN go install -v .
RUN go build
ENTRYPOINT ["/go/bin/keda-talk"]
kubectl apply -f keda-2.7.1.yaml
kubectl apply -f deployment/dummy-deployment.yaml
kubectl apply -f deployment/app-deployment.yaml
kubectl apply -f deployment/redis-deployment.yaml
kubectl apply -f keda/redis-hpa.yaml
kubectl exec -it $(kubectl get pods | grep "server" | cut -f 1 -d " ") keda-talk redis publish
keda-operator 默认会出现如下错误
##1.6545075479686694e+09 ERROR controller.scaledobject Reconciler error {"reconciler group": #"keda.sh", "reconciler kind": "ScaledObject", "name": "redis-scaledobject", "namespace": "default", "error": "connection to redis failed: dial tcp: address REDIS_ADDRESS: missing port in address"}
或
1.6545067911331463e+09 ERROR controller.scaledobject Failed to create new HPA resource {"reconciler group": "keda.sh", "reconciler kind": "ScaledObject", "name": "redis-scaledobject", "namespace": "default", "HPA.Namespace": "default", "HPA.Name": "keda-hpa-redis-scaledobject", "error": "connection to redis failed: dial tcp: lookup redis on 10.96.0.10:53: no such host"}
原因是redis-hpa和keda operator处于不同namespace,调整redis-hpa yaml文件中的address 如下:
spec:
maxReplicaCount: 4
pollingInterval: 3 # Optional. Default: 30 seconds
cooldownPeriod: 5 # Optional. Default: 300 seconds
scaleTargetRef:
name: dummy
triggers:
- type: redis
metadata:
# addressFromEnv: REDIS_ADDRESS
listName: default
listLength: "10"
enableTLS: "false"
address: redis.default.svc.cluster.local:6379
再次部署并查看hpa是否创建
root@ubuntu:~/k8s/keda/keda-example-master# kubectl delete -f keda/redis-hpa.yaml
scaledobject.keda.sh "redis-scaledobject" deleted
root@ubuntu:~/k8s/keda/keda-example-master# kubectl apply -f keda/redis-hpa.yaml
scaledobject.keda.sh/redis-scaledobject created
root@ubuntu:~/k8s/keda/keda-example-master# kubectl get hpa -o wide --all-namespaces
NAMESPACE NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
default keda-hpa-redis-scaledobject Deployment/dummy <unknown>/10 (avg) 1 4 0 3s
执行测试命令
kubectl exec -it $(k get pods | grep "server" | cut -f 1 -d " ") keda-talk redis publish
查看结果
root@ubuntu:~/k8s/keda/keda-example-master# kubectl get pods -o wide --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
default dummy-79d8bb5844-hsm2t 1/1 Running 0 20s 10.244.1.6 ubuntu152 <none> <none>
default dummy-79d8bb5844-lcv6n 1/1 Running 0 20s 10.244.2.7 ubuntu153 <none> <none>
default dummy-79d8bb5844-llw4b 1/1 Running 0 19s 10.244.2.8 ubuntu153 <none> <none>
default dummy-79d8bb5844-r8jqs 1/1 Running 1 (113s ago) 168m 10.244.1.4 ubuntu152 <none> <none>
default redis-db4894674-676q2 1/1 Running 0 165m 10.244.1.5 ubuntu152 <none> <none>
default server-5c74b4668c-69225 1/1 Running 0 167m 10.244.2.6 ubuntu153 <none> <none>
keda keda-metrics-apiserver-6c7d49b596-8lbj4 1/1 Running 0 175m 10.244.2.4 ubuntu153 <none> <none>
keda keda-operator-d9f5579-qjp57 1/1 Running 0 175m 10.244.2.5 ubuntu153 <none> <none>
kube-system coredns-6d8c4cb4d-fp5gn 1/1 Running 0 6h45m 10.244.0.2 ubuntu <none> <none>
kube-system coredns-6d8c4cb4d-rb5c5 1/1 Running 0 6h45m 10.244.0.3 ubuntu <none> <none>
kube-system etcd-ubuntu 1/1 Running 4 6h46m 192.168.XXX.XXX ubuntu <none> <none>
kube-system kube-apiserver-ubuntu 1/1 Running 5 6h45m 192.168.XXX.XXX ubuntu <none> <none>
kube-system kube-controller-manager-ubuntu 1/1 Running 0 6h45m 192.168.XXX.XXX ubuntu <none> <none>
kube-system kube-flannel-ds-gmwbn 1/1 Running 10 (6h17m ago) 6h41m 192.168.XXX.XXX ubuntu153 <none> <none>
kube-system kube-flannel-ds-ptdxk 1/1 Running 0 6h41m 192.168.XXX.XXX ubuntu <none> <none>
kube-system kube-flannel-ds-t6vjw 1/1 Running 10 (6h18m ago) 6h41m 192.168.XXX.XXX ubuntu152 <none> <none>
kube-system kube-proxy-jszpl 1/1 Running 1 (6h16m ago) 6h45m 192.168.XXX.XXX ubuntu152 <none> <none>
kube-system kube-proxy-m7942 1/1 Running 0 6h45m 192.168.XXX.XXX ubuntu <none> <none>
kube-system kube-proxy-qg9mt 1/1 Running 1 (6h17m ago) 6h44m 192.168.XXX.XXX ubuntu153 <none> <none>
kube-system kube-scheduler-ubuntu 1/1 Running 12 6h45m 192.168.XXX.XXX ubuntu <none> <none>
kube-system metrics-server-7fd564dc66-446dd 1/1 Running 0 3h9m 10.244.1.3 ubuntu152 <none> <none>
root@ubuntu:~/k8s/keda/keda-example-master#