Kubespray is a composition of Ansible playbooks, inventory, provisioning tools, and domain knowledge for generic OS/Kubernetes clusters configuration management tasks. Kubespray provides:
- a highly available cluster
- composable attributes
- support for most popular Linux distributions
- Ubuntu 16.04, 18.04, 20.04
- CentOS/RHEL/Oracle Linux 7, 8
- Debian Buster, Jessie, Stretch, Wheezy
- Fedora 31, 32
- Fedora CoreOS
- openSUSE Leap 15
- Flatcar Container Linux by Kinvolk
IP | hostname |
---|---|
10.0.70.251 | n1 |
10.0.70.252 | n2 |
10.0.70.253 | n3 |
组件 | 版本 |
---|---|
kubespray | 2.18.0 |
Kubernetes | 1.22.5 |
Etcd | 3.5.0 |
Docker | 20.10 |
Containerd | 1.5.8 |
CRI-O | 1.22 |
CNI-plugins | v1.0.1 |
Calico | v3.20.3 |
Cilium | 1.9.11 |
Flannel | 0.15.1 |
Kube-ovn | 1.8.1 |
Kube-Router | 1.3.2 |
Multus | 3.8 |
Weave | 2.8.1 |
CoreDNS | 1.8.0 |
Nodelocaldns | 1.21.1 |
Helm | 3.7.1 |
Nginx-ingress | 1.0.4 |
Cert-manager | 1.5.4 |
Kubernetes Dashboard | 2.4.0 |
vi /etc/hosts
10.0.70.251 n1
10.0.70.252 n2
10.0.70.253 n3
# 查看防火墙状态
firewall-cmd --state
#停止firewall #禁止firewall开机启动
systemctl stop firewalld.service & systemctl disable firewalld.service
setenforce 0
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
swapoff -a && sysctl -w vm.swappiness=0 && sysctl -p && free –h
vi /etc/security/limits.conf
* soft nofile 32768
* hard nofile 65535
* soft nproc 32768
* hadr nproc 65535
#生成ssh key
ssh-keygen
#其他服务器免密码:
ssh-copy-id root@n1
ssh-copy-id root@n2
ssh-copy-id root@n3
#测试
ssh root@n2
#退出
exit
#全局代理设置
vi /etc/profile
export proxy="http://10.0.70.32:10801"
export http_proxy=$proxy
export https_proxy=$proxy
export ftp_proxy=$proxy
export no_proxy="localhost, 127.0.0.1, ::1"
#生效
source /etc/profile
#测试
wget google.com
#关闭代理
#要关闭代理,仅仅注释掉profile的代理内容是不行的,在文件内加入以下内容,并重新source一下
unset http_proxy
unset https_proxy
unset ftp_proxy
unset no_proxy
cd /usr/local/
wget https://github.com/kubernetes-sigs/kubespray/archive/v2.18.0.tar.gz
tar -xzvf v2.18.0.tar.gz
cd kubespray-2.18.0
yum -y install epel-release python3 python3-pip yum-utils --downloadonly --downloaddir=/tmp/releases/offline-packages
yum -d 2 -y install conntrack container-selinux rsync socat unzip bash-completion ipvsadm --downloadonly --downloaddir=/tmp/releases/offline-packages
cd /tmp/releases/offline-packages
yum -y localinstall *.rpm
python3 -V & pip3 -V
cd /usr/local/kubespray-2.18.0
# 1.Install dependencies from ``requirements.txt``
#获取离线包
sudo pip3 download -d /tmp/releases/python-installer -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com -r requirements.txt
#离线安装
sudo pip3 install --no-index --find-links=/tmp/releases/python-installer -r requirements.txt
#查看
pip3 list
# 2.Copy ``inventory/sample`` as ``inventory/mycluster``
cp -rfp inventory/sample inventory/mycluster
#需要socks服务端开启允许局域网访问
vi inventory/mycluster/group_vars/all/all.yml
###
http_proxy: "http://10.0.70.32:10801"
https_proxy: "http://10.0.70.32:10801"
no_proxy: "localhost,127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.svc.cluster.local"
download_validate_certs: False
download_container: true
# 4.Review and change parameters under ``inventory/mycluster/group_vars``
# 自定义k8s集群信息,编辑配置文件all.yml进行修改
cat inventory/mycluster/group_vars/all/all.yml
cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
# 添加需要的组件
vi inventory/mycluster/group_vars/k8s_cluster/addons.yml
dashboard_enabled: true
helm_enabled: true
ingress_nginx_enabled: true
ingress_nginx_host_network: true
# 缓存镜像
vi roles/download/defaults/main.yml
download_cache_dir: /tmp/kubespray_cache
download_run_once: true
# kube_service_addresses、
# kube_pods_subnet、
# kube_proxy_mode默认ipvs(ipvs或者iptables)
vi inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
# kube_network_plugin修改网络插件默认calico(cilium, calico, weave or flannel)
kube_network_plugin: flannel
# Random shifts for retrying failed ops like pushing/downloading
retry_stagger: 1
#Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
ndots: 4
# 3.Update Ansible inventory file with inventory builder
declare -a IPS=(10.0.70.251 10.0.70.252 10.0.70.253)
CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
#修改hosts.yaml配置主机名可以避免修改主机名
ansible -i inventory/mycluster/hosts.yaml all -m ping
# 5.Deploy Kubespray with Ansible Playbook - run the playbook as root
# The option `--become` is required, as for example writing SSL keys in /etc/,
# installing packages and interacting with various systemd daemons.
# Without --become the playbook will fail to run!
#配置IPS多节点部署,查看hosts.yaml即可看到,可以根据需求自己搭配各个主机的角色
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml -vvvvv --flush-cache
#部署失败,重试
ansible-playbook -i inventory/mycluster/hosts.yaml --become --become-user=root cluster.yml --limit node1 -vvvvv
#部署失败,重置
ansible-playbook -i inventory/mycluster/hosts.yaml reset.yml
【问题1】Stop if memory is too small for masters
修改:roles/kubernetes/preinstall/defaults/main.yml
minimal_node_memory_mb: 1024
minimal_master_memory_mb: 1500
#查看集群状态
kubectl get nodes -o wide
kubectl get pods --all-namespaces
kubectl get pods -n kube-system
kubectl get services --all-namespaces
#查看账号 k8dash-sa 登录token:
kubectl describe secret k8dash-sa -n kube-system | awk '$1=="token:"{print $2}'
#查看证书有效期
kubeadm alpha certs check-expiration
yum history
yum history info 3
根据如上命名可以看到执行过的安装命令,具体如下:
-y install yum-utils
-d 2 -y install conntrack container-selinux rsync socat unzip bash-completion ipvsadm
cd contrib/offline 查看README.md
# manage-offline-container-images.sh
Container image collecting script for offline deployment
This script has two features:
(1) Get container images from an environment which is deployed online.
(2) Deploy local container registry and register the container images to the registry.
Step(1) should be done online site as a preparation, then we bring the gotten images
to the target offline environment.
Then we will run step(2) for registering the images to local registry.
Step(1) can be operated with:
manage-offline-container-images.sh create
Step(2) can be operated with:
manage-offline-container-images.sh register
#generate_list.sh
This script generates the list of downloaded files and the list of container images by `roles/download/defaults/main.yml` file.
./generate_list.sh
cat temp/files.list
通过 wget 进行下载
wget -x -P temp/files -i temp/files.list
所有可能会用到的镜像
cat temp/images.list
tar -czvf kubespray-2.18.0-cache.tar.gz /tmp/kubespray_cache/
tar -czvf kubespray-2.18.0-releases.tar.gz /tmp/releases/
#解压
cd /
tar -xzvf /tmp/kubespray-2.18.0-cache.tar.gz
tar -xzvf /tmp/kubespray-2.18.0-releases.tar.gz
#kubectl proxy方式访问
kubectl proxy --address='0.0.0.0' --accept-hosts='^*$'
#kubernetes-dashboard
http://10.0.70.251:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#/login
#NodePort方式访问
kubectl edit svc/kubernetes-dashboard -n kube-system
type: NodePort
#查看端口
kubectl get svc kubernetes-dashboard -n kube-system
https://10.0.70.251:32690/
#ingress-nginx方式访问
kubectl apply -f ingress-nginx-kubernetes-dashboard.yaml
kubectl get ingress -n kube-system
dashboard可以查看kubernetes系统的整体情况,为了访问dashboard页面,需要增加RBAC:
tee admin-user.yaml <<-'EOF'
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
EOF
tee admin-user-role.yaml <<-'EOF'
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system
EOF
创建ServiceAccount和ClusterRoleBinding:
kubectl apply -f admin-user.yaml && kubectl apply -f admin-user-role.yaml
你可以通过运行 scale playbook 向集群中添加工作节点。有关更多信息, 请参见 “添加节点”。 你可以通过运行 remove-node playbook 来从集群中删除工作节点。
你可以通过运行 upgrade-cluster Playbook 来升级集群
你可以通过 reset Playbook 重置节点并清除所有与 Kubespray 一起安装的组件。
默认Kubernetes的端口范围为30000-32767,为便于后期应用,如ingress的80、443端口,建议开放全端口。同时开放全端口范围后,需要注意避开公共端口,如8080。需要在所有Master节点操作。
vi /etc/kubernetes/manifests/kube-apiserver.yaml
--service-node-port-range=1-65535
#kubectl proxy,它在您的机器与Kubernetes API之间创建一个代理,默认情况下,只能从本地访问(启动它的机器)。
#启动代理只需执行如下命令:
$ kubectl proxy
Starting to serve on 127.0.0.1:8001
#也可以使用--address和--accept-hosts参数来允许外部访问:
kubectl proxy --address='0.0.0.0' --accept-hosts='^*$'
#kubernetes-dashboard
http://10.0.70.251:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#/login
kubectl get pod <pod-name> -o yaml 查看 Pod 的配置是否正确
kubectl describe pod <pod-name> 查看 Pod 的事件
kubectl logs <pod-name> [-c <container-name>] 查看容器日志
#
kubectl logs kubernetes-dashboard-548847967d-6rfh9 -n kube-system
# 具体原因需要查看 Kubelet 日志 按 unit 过滤日志
journalctl -u kubelet
#实时更新日志
journalctl -f -u kubelet
# 只显示最新的 n 行
journalctl -u kubelet -n 200
# 通过日志级别进行过滤,指定的优先级如下:
# 0: emerg
# 1: alert
# 2: crit
# 3: err
# 4: warning
# 5: notice
# 6: info
# 7: debug
journalctl -u kubelet -p err