当前位置: 首页 > 工具软件 > kubeadm > 使用案例 >

kubeadm安装集群

钮安歌
2023-12-01

kubeadm安装kubernetes集群

环境

服务器类型IP地址安装组件
k8s-master(2C/4G,cpu核心数要求大于2)192.168.5.33/24docker、kubeadm、kubectl、kubelet、cni
k8s-node1 (2C/2G)192.168.5.34/24docker、kubeadm、kubectl、kubelet、cni
k8s-node2 (2C/2G)192.168.5.35/24docker、kubeadm、kubectl、kubelet、cni

所有节点执行

#所有节点,关闭防火墙规则,关闭selinux,关闭swap交换
systemctl stop firewalld
systemctl disable firewalld
setenforce 0
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
swapoff -a		#交换分区必须要关闭
sed -ri 's/.*swap.*/#&/' /etc/fstab	#永久关闭swap分区,&符号在sed命令中代表上次匹配的结果
#加载ip_vs模块
for i in $(ls /usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs|grep -o "^[^.]*");do echo $i; /sbin/modinfo -F filename $i >/dev/null 2>&1 && /sbin/modprobe $i;done

修改主机名和hosts文件

hostnamectl set-hostname k8s-master
hostnamectl set-hostname k8s-node1
hostnamectl set-hostname k8s-node2
--------------------------------------
cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.5.33 k8s-master
192.168.5.34 k8s-node1
192.168.5.35 k8s-node2
-----------------------
scp /etc/hosts k8s-node1:/etc/hosts

调整内核参数

#调整内核参数

cat > /etc/sysctl.d/kubernetes.conf << EOF
#开启网桥模式,可将网桥的流量传递给iptables链
net.bridge.bridge-nf-call-ip6tables=1
net.bridge.bridge-nf-call-iptables=1
#关闭ipv6协议
net.ipv6.conf.all.disable_ipv6=1
net.ipv4.ip_forward=1
EOF

#加载参数
sysctl --system

镜像准备

centos

curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo

docker-ce

# step 1: 安装必要的一些系统工具
yum install -y yum-utils device-mapper-persistent-data lvm2
# Step 2: 添加软件源信息
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# Step 3
sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
# Step 4: 更新并安装Docker-CE
yum makecache fast
yum -y install docker-ce
----------------------------------------
mkdir /etc/docker
cat > /etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  }
}
EOF
#使用Systemd管理的Cgroup来进行资源控制与管理,因为相对Cgroupfs而言,Systemd限制CPU、内存等资源更加简单和成熟稳定。
#日志使用json-file格式类型存储,大小为100M,保存在/var/log/containers目录下,方便ELK等日志系统收集和管理日志。

systemctl daemon-reload
systemctl restart docker.service
systemctl enable docker.service 

docker info | grep "Cgroup Driver"
Cgroup Driver: systemd

kubernetes

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
setenforce 0
yum install -y kubelet-1.20.15 kubeadm-1.20.15 kubectl-1.20.15
systemctl enable kubelet && systemctl start kubelet

部署K8S集群

#查看初始化需要的镜像
[root@k8s-master ~]# kubeadm config images list
I0805 15:35:34.093193    2139 version.go:254] remote version is much newer: v1.24.3; falling back to: stable-1.20
k8s.gcr.io/kube-apiserver:v1.20.15
k8s.gcr.io/kube-controller-manager:v1.20.15
k8s.gcr.io/kube-scheduler:v1.20.15
k8s.gcr.io/kube-proxy:v1.20.15
k8s.gcr.io/pause:3.2
k8s.gcr.io/etcd:3.4.13-0
k8s.gcr.io/coredns:1.7.0

创建init文件并修改

kubeadm config print init-defaults > init-config.yaml
---
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.5.33    //改为自身IP地址
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: k8s-master
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers         //更改镜像源
kind: ClusterConfiguration
kubernetesVersion: v1.20.15                      //修改为kubelet版本
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  podSubnet: 10.244.0.0/16              //添加
scheduler: {}

-----

查看所需镜像并拉取
kubeadm config images list --config /root/new.yaml
[root@k8s-master k8s]# kubeadm config images pull --config=init-config.yaml
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-apiserver:v1.20.15
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-controller-manager:v1.20.15
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-scheduler:v1.20.15
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-proxy:v1.20.15
[config/images] Pulled registry.aliyuncs.com/google_containers/pause:3.2
[config/images] Pulled registry.aliyuncs.com/google_containers/etcd:3.4.13-0
[config/images] Pulled registry.aliyuncs.com/google_containers/coredns:1.7.0
[root@k8s-master k8s]# docker images
REPOSITORY                                                        TAG        IMAGE ID       CREATED         SIZE
registry.aliyuncs.com/google_containers/kube-proxy                v1.20.15   46e2cd1b2594   6 months ago    99.7MB
registry.aliyuncs.com/google_containers/kube-controller-manager   v1.20.15   d6296d0e06d2   6 months ago    116MB
registry.aliyuncs.com/google_containers/kube-scheduler            v1.20.15   9155e4deabb3   6 months ago    47.3MB
registry.aliyuncs.com/google_containers/kube-apiserver            v1.20.15   323f6347f5e2   6 months ago    122MB
registry.aliyuncs.com/google_containers/etcd                      3.4.13-0   0369cf4303ff   23 months ago   253MB
registry.aliyuncs.com/google_containers/coredns                   1.7.0      bfe3a36ebd25   2 years ago     45.2MB
registry.aliyuncs.com/google_containers/pause                     3.2        80d28bedfe5d   2 years ago     683kB

初始化安装k8s

[root@k8s-master k8s]# kubeadm init --config=init-config.yaml
....
Your Kubernetes control-plane has initialized successfully!
....
kubeadm join 192.168.5.33:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:790af2eeac10e22ab5660bf102ec93c382d9b8c7480addc3fecdfc2251cfc092

添加节点

[root@k8s-node1 ~]# kubeadm join 192.168.5.33:6443 --token abcdef.0123456789abcdef \
>     --discovery-token-ca-cert-hash sha256:790af2eeac10e22ab5660bf102ec93c382d9b8c7480addc3fecdfc2251cfc092
[preflight] Running pre-flight checks
        [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.17. Latest validated version: 19.03
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

设定kubectl

#kubectl需经由API server认证及授权后方能执行相应的管理操作,kubeadm 部署的集群为其生成了一个具有管理员权限的认证配置文件 /etc/kubernetes/admin.conf,它可由 kubectl 通过默认的 “$HOME/.kube/config” 的路径进行加载。
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config

查看节点

Kubeadm 通过初始化安装是不包括网络插件的,也就是说初始化之后是不具备相关网络功能的,比如 k8s-master 节点上查看节点信息都是“Not Ready”状态、Pod 的 CoreDNS无法提供服务等。

[root@k8s-master k8s]# kubectl get nodes
NAME         STATUS     ROLES                  AGE     VERSION
k8s-master   NotReady   control-plane,master   3m41s   v1.20.15
k8s-node1    NotReady   <none>                 2m53s   v1.20.15
k8s-node2    NotReady   <none>                 2m54s   v1.20.15

安装网络插件

[root@k8s-master ~]#docker pull quay.io/coreos/flannel:v0.9.1-amd64
[root@k8s-master ~]#mkdir -p /etc/cni/net.d/
[root@k8s-master ~]#cat <<EOF> /etc/cni/net.d/10-flannel.conf
{"name":"cbr0","type":"flannel","delegate": {"isDefaultGateway": true}}
EOF
[root@k8s-master ~]#mkdir /usr/share/oci-umount/oci-umount.d -p
[root@k8s-master ~]#mkdir /run/flannel/
[root@k8s-master ~]#cat <<EOF> /run/flannel/subnet.env
FLANNEL_NETWORK=172.100.0.0/16
FLANNEL_SUBNET=172.100.1.0/24
FLANNEL_MTU=1450
FLANNEL_IPMASQ=true
EOF
scp -r /etc/cni k8s-node1:/etc
[root@k8s-node1 ~]# systemctl restart kubelet
scp -r /etc/cni k8s-node2:/etc
[root@k8s-node2 ~]# systemctl restart kubelet
[root@k8s-master ~]# kubectl get nodes
NAME         STATUS   ROLES                  AGE   VERSION
k8s-master   Ready    control-plane,master   23m   v1.20.15
k8s-node1    Ready    <none>                 22m   v1.20.15
k8s-node2    Ready    <none>                 22m   v1.20.15

horbor部署

前置

harbor需要docker和docker-compose

[root@harbor ~]# wget https://github.com/goharbor/harbor/releases/download/v2.1.1/harbor-offline-installer-v2.1.1.tgz
[root@harbor ~]# tar xf harbor-offline-installer-v2.1.1.tgz
[root@harbor ~]# cd harbor/
[root@harbor harbor]# cp harbor.yml.tmpl harbor.yml
[root@harbor harbor]# vim harbor.yml
-----------------------------------------------------------------------------------------------
# Configuration file of Harbor

# The IP address or hostname to access admin UI and registry service.
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
hostname: 192.168.5.210    //主机ip

# http related config
http:
  # port for http, default is 80. If https enabled, this port will redirect to https port
  port: 5000    //端口

# https related config
# https:						局域网可以不使用https
  # https port for harbor, default is 443
  # port: 443
  # The path of cert and key files for nginx
  # certificate: /your/certificate/path
  # private_key: /your/private/key/path

# # Uncomment following will enable tls communication between all harbor components
# internal_tls:
#   # set enabled to true means internal tls is enabled
#   enabled: true
#   # put your cert and key files on dir
#   dir: /etc/harbor/tls/internal

# Uncomment external_url if you want to enable external proxy
# And when it enabled the hostname will no longer used
# external_url: https://reg.mydomain.com:8433

# The initial password of Harbor admin
# It only works in first time to install harbor
# Remember Change the admin password from UI after launching Harbor.
harbor_admin_password: harbor                 //指定登录密码

# Harbor DB configuration          #这里数据库可以不用更改,注释一会安装会出问题
database:
  # The password for the root user of Harbor DB. Change this before any production use.
  password: root123
  # The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
  max_idle_conns: 50
  # The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
  # Note: the default number of connections is 1024 for postgres of harbor.
  max_open_conns: 1000

# The default data volume
data_volume: /harbor/data   		//指定harbor数据存放路径
....
--------------------------------------------------
[root@harbor harbor]#./install.sh
 类似资料: