当前位置: 首页 > 工具软件 > KubeSphere > 使用案例 >

Kubesphere-多节点安装

斜宁
2023-12-01

主机清单

序号IP主机名配置
1192.168.85.161k8s-master8VCPU 32GB
2192.168.85.162k8s-node18VCPU 32GB
3192.168.85.163k8s-node28VCPU 32GB
4192.168.85.164nfs-server8VCPU 32GB

基础环境准备

nfs服务器安装部署(略)

1. 安装docker

curl -sSL https://get.daocloud.io/docker | sh
# 加速
curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://f1361db2.m.daocloud.io

2. 安装依赖

yum -y install socat conntrack ebtables ipset net-tools

3. 配置ssh互访

# 所有节点上执行
ssh-keygen
ssh-copy-id -i ~/.ssh/id_rsa.pub root@192.168.85.161
ssh-copy-id -i ~/.ssh/id_rsa.pub root@192.168.85.162
ssh-copy-id -i ~/.ssh/id_rsa.pub root@192.168.85.163

4. 下载 KubeKey

# 只在master上执行即可
export KKZONE=cn
curl -sfL https://get-kk.kubesphere.io | VERSION=v2.0.0 sh -

创建集群

1. 创建示例配置文件

./kk create config --with-kubesphere

2. 编辑配置文件

插件根据使用情况和服务器资源大小,自行安装。

apiVersion: kubekey.kubesphere.io/v1alpha2
kind: Cluster
metadata:
  name: sample
spec:
  hosts:   # 配置主机清单
  - {name: k8s-master, address: 192.168.85.161, internalAddress: 192.168.85.161, privateKeyPath: "~/.ssh/id_rsa"}
  - {name: k8s-node1, address: 192.168.85.162, internalAddress: 192.168.85.162, privateKeyPath: "~/.ssh/id_rsa"}
  - {name: k8s-node2, address: 192.168.85.163, internalAddress: 192.168.85.163, privateKeyPath: "~/.ssh/id_rsa"}
  roleGroups:
    etcd:    # etcd 节点列表
    - k8s-master  
    control-plane:    # 主机节点列表
    - k8s-master  
    worker:    # 工作节点列表
    - k8s-node1
    - k8s-node2
  controlPlaneEndpoint:
    ## Internal loadbalancer for apiservers 
    # internalLoadbalancer: haproxy

    domain: lb.kubesphere.local
    address: ""
    port: 6443
  kubernetes:
    version: v1.21.5
    clusterName: cluster.local
  network:
    plugin: calico
    kubePodsCIDR: 10.233.64.0/18
    kubeServiceCIDR: 10.233.0.0/18
    multusCNI:
      enabled: false
  registry:
    plainHTTP: false
    privateRegistry: ""
    namespaceOverride: ""
    registryMirrors: []
    insecureRegistries: []
  addons: 
  - name: nfs-client    # 使用nfs作为持久化存储
    namespace: kube-system
    sources: 
      chart: 
        name: nfs-client-provisioner
        repo: https://charts.kubesphere.io/main
        values:
        - storageClass.defaultClass=true
        - nfs.server=192.168.85.164
        - nfs.path=/data

---
apiVersion: installer.kubesphere.io/v1alpha1
kind: ClusterConfiguration
metadata:
  name: ks-installer
  namespace: kubesphere-system
  labels:
    version: v3.2.1
spec:
  persistence:
    storageClass: ""
  authentication:
    jwtSecret: ""
  local_registry: ""
  namespace_override: ""
  # dev_tag: ""
  etcd:
    monitoring: true  # 根据情况决定是否开启
    endpointIps: localhost
    port: 2379
    tlsEnable: true
  common:
    core:
      console:
        enableMultiLogin: true
        port: 30880
        type: NodePort
    # apiserver:
    #  resources: {}
    # controllerManager:
    #  resources: {}
    redis:
      enabled: true  # 根据情况决定是否开启
      volumeSize: 2Gi
    openldap:
      enabled: true  # 根据情况决定是否开启
      volumeSize: 2Gi
    minio:
      volumeSize: 20Gi
    monitoring:
      # type: external
      endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090
      GPUMonitoring:
        enabled: true  # 根据情况决定是否开启
    gpu:
      kinds:         
      - resourceName: "nvidia.com/gpu"
        resourceType: "GPU"
        default: true
    es:
      # master:
      #   volumeSize: 4Gi
      #   replicas: 1
      #   resources: {}
      # data:
      #   volumeSize: 20Gi
      #   replicas: 1
      #   resources: {}
      logMaxAge: 7
      elkPrefix: logstash
      basicAuth:
        enabled: false  # 是否使用验证
        username: ""
        password: ""
      externalElasticsearchHost: ""
      externalElasticsearchPort: ""
  alerting:
    enabled: true  # 根据情况决定是否开启
    # thanosruler:
    #   replicas: 1
    #   resources: {}
  auditing:
    enabled: true  # 根据情况决定是否开启
    # operator:
    #   resources: {}
    # webhook:
    #   resources: {}
  devops:
    enabled: true  # 根据情况决定是否开启
    jenkinsMemoryLim: 2Gi
    jenkinsMemoryReq: 1500Mi
    jenkinsVolumeSize: 8Gi
    jenkinsJavaOpts_Xms: 512m
    jenkinsJavaOpts_Xmx: 512m
    jenkinsJavaOpts_MaxRAM: 2g
  events:
    enabled: true
    # operator:
    #   resources: {}
    # exporter:
    #   resources: {}
    # ruler:
    #   enabled: true
    #   replicas: 2
    #   resources: {}
  logging:
    enabled: true
    containerruntime: docker
    logsidecar:
      enabled: true
      replicas: 2
      # resources: {}
  metrics_server:
    enabled: true
  monitoring:
    storageClass: ""
    # kube_rbac_proxy:
    #   resources: {}
    # kube_state_metrics:
    #   resources: {}
    # prometheus:
    #   replicas: 1
    #   volumeSize: 20Gi
    #   resources: {}
    #   operator:
    #     resources: {}
    #   adapter:
    #     resources: {}
    # node_exporter:
    #   resources: {}
    # alertmanager:
    #   replicas: 1
    #   resources: {}
    # notification_manager:
    #   resources: {}
    #   operator:
    #     resources: {}
    #   proxy:
    #     resources: {}
    gpu:
      nvidia_dcgm_exporter:
        enabled: true
        # resources: {}
  multicluster:
    clusterRole: none 
  network:
    networkpolicy:
      enabled: true
    ippool:
      type: none
    topology:
      type: none
  openpitrix:
    store:
      enabled: false
  servicemesh:
    enabled: true
  kubeedge:
    enabled: false   
    cloudCore:
      nodeSelector: {"node-role.kubernetes.io/worker": ""}
      tolerations: []
      cloudhubPort: "10000"
      cloudhubQuicPort: "10001"
      cloudhubHttpsPort: "10002"
      cloudstreamPort: "10003"
      tunnelPort: "10004"
      cloudHub:
        advertiseAddress:
          - ""
        nodeLimit: "100"
      service:
        cloudhubNodePort: "30000"
        cloudhubQuicNodePort: "30001"
        cloudhubHttpsNodePort: "30002"
        cloudstreamNodePort: "30003"
        tunnelNodePort: "30004"
    edgeWatcher:
      nodeSelector: {"node-role.kubernetes.io/worker": ""}
      tolerations: []
      edgeWatcherAgent:
        nodeSelector: {"node-role.kubernetes.io/worker": ""}
        tolerations: []

3. 使用配置文件创建集群

./kk create cluster -f config-sample.yaml
  1. 验证安装

安装完成后,您会看到如下内容:

#####################################################
###              Welcome to KubeSphere!           ###
#####################################################

Console: http://192.168.85.161:30880
Account: admin
Password: P@88w0rd

NOTES:
  1. After you log into the console, please check the
     monitoring status of service components in
     "Cluster Management". If any service is not
     ready, please wait patiently until all components
     are up and running.
  2. Please change the default password after login.

#####################################################
https://kubesphere.io             2022-07-28 03:10:29
#####################################################

安装openelb

1. 安装

kubectl apply -f https://raw.githubusercontent.com/openelb/openelb/master/deploy/openelb.yaml

2. 启用strictARP

需要为 kube-proxy 启用 strictARP,以便 Kubernetes 集群中的所有网卡停止响应其他网卡的 ARP 请求,而由 OpenELB 处理 ARP 请求。

kubectl edit configmap kube-proxy -n kube-system
...
ipvs:
  strictARP: true
...

然后执行下面的命令重启 kube-proxy 组件即可:

kubectl rollout restart daemonset kube-proxy -n kube-system

3. 配置EIP

apiVersion: network.kubesphere.io/v1alpha2
kind: Eip
metadata:
    name: eip-pool
    annotations:
      eip.openelb.kubesphere.io/is-default-eip: "true"
spec:
    address: 192.168.85.91-192.168.85.100  # 地址池
    protocol: layer2  # 协议:BGP, Layer 2, or VIP
    interface: eth0  # 网卡
    disable: false

4. 如何使用

1. 创建deployment

  1. 创建yaml文件
vi layer2-openelb.yaml
  1. 写入以下内容
apiVersion: apps/v1
kind: Deployment
metadata:
 name: layer2-openelb
spec:
 replicas: 2
 selector:
   matchLabels:
     app: layer2-openelb
 template:
   metadata:
     labels:
       app: layer2-openelb
   spec:
     containers:
       - image: luksa/kubia
         name: kubia
         ports:
           - containerPort: 8080
  1. 应用yaml文件创建deployment
kubectl apply -f layer2-openelb.yaml

2. 创建service

  1. 创建yaml文件
vi layer2-svc.yaml
  1. 写入以下内容
kind: Service
apiVersion: v1
metadata:
 name: layer2-svc
 annotations:  # 需要添加以下3个注解
   lb.kubesphere.io/v1alpha1: openelb
   protocol.openelb.kubesphere.io/v1alpha1: layer2
   eip.openelb.kubesphere.io/v1alpha2: eip-pool  # eip地址池名称
spec:
 selector:
   app: layer2-openelb
 type: LoadBalancer
 ports:
   - name: http
     port: 80
     targetPort: 8080
 externalTrafficPolicy: Cluster
  1. 应用yaml文件创建service
kubectl apply -f layer2-svc.yaml

3. 验证

[root@k8s-master ~]# kubectl get svc
NAME         TYPE           CLUSTER-IP      EXTERNAL-IP     PORT(S)        AGE
kubernetes   ClusterIP      10.233.0.1      <none>          443/TCP        34m
layer2-svc   LoadBalancer   10.233.49.106   192.168.85.91   80:31929/TCP   4s

[root@k8s-master ~]# curl 192.168.85.91
You've hit layer2-openelb-7b4fdf6f85-nvsws
 类似资料: