当前位置: 首页 > 工具软件 > K8tools > 使用案例 >

ansible+K8S部署

从阎宝
2023-12-01

准备节点:

master192.168.1.212CPU,2G内存
node-0001192.168.1.312CPU,2G内存
node-0002192.168.1.322CPU,2G内存
node-0003192.168.1.332CPU,2G内存
registry192.168.1.1001CPU,1G内存

master部署ansbible


vim install_ansible.sh

#!/bin/bash
#配置yum源为阿里云
cd /etc/yum.repos.d/


#备份自带的yum源
mv CentOS-Base.repo CentOS-Base.repo.backup

#下载阿里云yum源
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo

yum clean all

yum -y install ansible



[root@master ~]#chmod +x install_ansible.sh
[root@master ~]#./instatll_ansible.sh

设置免密

[root@master ~]#ssh-copy-id root@192.168.1.31
[root@master ~]#ssh-copy-id root@192.168.1.32
[root@master ~]#ssh-copy-id root@192.168.1.33
[root@master ~]#ssh-copy-id root@192.168.1.100

cat /etc/hosts   在master上的/etc/hosts文件中添加k8s服务器节点信息

127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.21 master
192.168.1.31 node-0001
192.168.1.32 node-0002
192.168.1.33 node-0003
192.168.1.100 registry

在masteer服务器上的/etc/ansible/hosts文件中添加k8s服务器节点

[root@master ~]# vim /etc/ansible/hosts 
[k8s-all]
192.168.1.21
192.168.1.31
192.168.1.32
192.168.1.33
192.168.1.100
[master]
192.168.1.21
[nodes]
192.168.1.31
192.168.1.32
[registry]
192.168.1.100

[root@master ~]# ansible k8s-all -m ping

修改k8s集群各节点/etc/hosts

vim cat hosts_playbook.yml

---
- hosts: nodes
  remote_user: root

  tasks:
    - name: backup /etc/hosts
      shell: mv /etc/hosts /etc/host_bak
    - name: copy localhosts file to remote
      copy: src=/etc/hosts dest=/etc/ owner=root group=root mode=0644

[root@master ~]# ansible-playbook hosts_playbook.yml

安装镜像仓库   192.168.1.100

vim install_registry.yml

---
- hosts: registry
  remote_user: root

  tasks:
    - name: install_registry
      yum: name=docker-distribution state=present
    - name: name: start registry
      shell: systemctl enable --now docker-distribution
 

拷贝云盘 registry/myos目录 到 仓库服务器

[root@registry ~]# cd myos
[root@registry ~]# chmod 755 init-img.sh
[root@registry ~]# ./init-img.sh
[root@registry ~]# curl http://192.168.1.100:5000/v2/myos/tags/list
{"name":"myos","tags":["nginx","php-fpm","v1804","httpd"]}

安装Docker

cat install_docker_playbook.yml

- hosts: k8s-all
  remote_user: root
  vars: 
    docker_version: 18.09.2

  tasks:
    - name: install dependencies
      shell:  yum install -y yum-utils device-mapper-persistent-data lvm2
    - name: docker-repo
      shell: yum-config-manager --add-repo https://mirrors.ustc.edu.cn/docker-ce/linux/centos/docker-ce.repo
    - name: install docker
      yum: name=docker-ce-{{docker_version}} state=present
    - name: start docker
      shell: systemctl start docker && systemctl enable docker

[root@master ~]# vim  /etc/ansible/ansible.cfg
deprecation_warnings = false  ## 179默认是true,并且不生效
[root@master ~]# ansible-playbook install_docker_playbook.yml 

selinux,禁用 swap,卸载 firewalld-*

cat before.sh

#!/bin/bash
#防火墙
systemctl disable firewalld
systemctl stop firewalld
setenforce 0

#禁用swap
swapoff -a

#修改内核参数
cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

#重新加载配置文件
sysctl --system

#配置阿里k8s yum源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-/$basearch
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-ley.gpg
       https://packages.cloud.goodle.com/yum/doc/rpm-package-key.gpg
exclude=kubelet kubeadm kubectl

#更新缓存
yum clean all -y && yum makecache -y 
#安装
yum -y install kubelet kubeadm kubectl docker-ce --disableexcludes=kubernetes
#设置仓库地址
mkdir -p /etc/docker
echo  {
    "exec-opts": ["native.cgroupdriver=systemd"],
    "registry-mirrors": ["https://hub-mirror.c.163.com"],
    "insecure-registries":["192.168.1.100:5000", "registry:5000"]
} > /etc/docker/daemon.json

#启动仓库
systemctl enable --now docker kubelet


镜像导入私有仓库

把云盘 kubernetes/v1.17.6/base-images 中的镜像拷贝到 master

cat images.sh

cd base-images/
#导入镜像
for i in *.tar.gz;do docker load -i ${i};done

#上传镜像仓库

docker images |awk '$2!="TAG"{print $1,$2}'|while read _f _v;do
    docker tag ${_f}:${_v} 192.168.1.100:5000/${_f##*/}:${_v}; 
    docker push 192.168.1.100:5000/${_f##*/}:${_v}; 
    docker rmi ${_f}:${_v}; 
done

#Tab键设置

kubectl completion bash >/etc/bash_completion.d/kubectl
kubeadm completion bash >/etc/bash_completion.d/kubeadm

#安装IPVS代理软件包
[root@master ~]#yum install -y ipvsadm ipset

[root@master ~]#vim /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1

[root@master ~]#modprobe br_netfilter

[root@master ~]#sysctl --system

使用kubeadm部署  准备kubeadm-init.yaml

[root@master ~]#kubeadm config print init-defaults > init.default.yaml
[root@master ~]# mkdir init;cd init
[root@master ~]#cat kubeadm-init.yaml

apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.1.21
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: master
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: 192.168.1.100:5000
kind: ClusterConfiguration
kubernetesVersion: v1.17.6
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16
  serviceSubnet: 10.254.0.0/16
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
[root@master ~]# cat deloy_master_playbook.yml 
- hosts: master
  remote_user: root
  tasks: 
    - name: before
      script: ./root/before.sh
      script:./root/images.sh
    - name: init k8s
      shell: kubeadm init --config=kubeadm-init.yaml |tee master-init.log
    - name: config kube
      shell: mkdir -p $HOME/.kube && cp -i /etc/kubernetes/admin.conf $HOME/.kube/config && chown $(id -u):$(id -g) $HOME/.kube/config

node安装

vim  yum.sh

#配置阿里k8s yum源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-/$basearch
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-ley.gpg
       https://packages.cloud.goodle.com/yum/doc/rpm-package-key.gpg
exclude=kubelet kubeadm kubectl

#更新缓存
yum clean all -y && yum makecache -y 

vim node_install.yaml

---
- name:
  hosts:
  - nodes
  vars:
    master: '192.168.1.21:6443'
    token: 'fm6kui.mp8rr3akn74a3nyn'
    token_hash: 'sha256:f46dd7ee29faa3c096cad189b0f9aedf59421d8a881f7623a543065fa6b0088c'
  tasks:
  - name: disable swap
    lineinfile:
      path: /etc/fstab
      regexp: 'swap'
      state: absent
    notify: disable swap
  - name: Ensure SELinux is set to disabled mode
    lineinfile:
      path: /etc/selinux/config
      regexp: '^SELINUX='
      line: SELINUX=disabled
    notify: disable selinux
  - name: remove the firewalld
    yum:
      name:
      - firewalld 
      - firewalld-filesystem
      state: absent
  - name: install k8s node tools
    yum:
      name:
      - kubeadm
      - kubelet
      - docker-ce
      - ipvsadm
      - ipset
      state: present
      update_cache: yes
  - name: Create a directory if it does not exist
    file:
      path: /etc/docker
      state: directory
      mode: '0755'
  - name: Copy file with /etc/hosts
    copy:
      src: files/hosts
      dest: /etc/hosts
      owner: root
      group: root
      mode: '0644'
  - name: Copy file with /etc/docker/daemon.json
    copy:
      src: files/daemon.json
      dest: /etc/docker/daemon.json
      owner: root
      group: root
      mode: '0644'
  - name: Copy file with /etc/sysctl.d/k8s.conf
    copy:
      src: files/k8s.conf
      dest: /etc/sysctl.d/k8s.conf
      owner: root
      group: root
      mode: '0644'
    notify: enable sysctl args
  - name: enable k8s node service
    service:
      name: "{{ item }}"
      state: started
      enabled: yes
    with_items:
    - docker
    - kubelet
  - name: check node state
    stat:
      path: /etc/kubernetes/kubelet.conf
    register: result
  - name: node join
    shell: kubeadm join '{{ master }}' --token '{{ token }}' --discovery-token-ca-cert-hash '{{ token_hash }}'
    when: result.stat.exists == False
  handlers:
  - name: disable swap
    shell: swapoff -a
  - name: disable selinux
    shell: setenforce 0
  - name: enable sysctl args
    shell: sysctl --system

网络插件安装配置

[root@master ~]#kubectl apply -f "https://docs.projectcalico.org/manifestes/calico.yaml"

 上传镜像到私有仓库

拷贝云盘 kubernetes/v1.17.6/flannel 目录到 master 上

[root@master ~]# cd flannel
[root@master flannel]# docker load -i flannel.tar.gz
[root@master flannel]# docker tag quay.io/coreos/flannel:v0.12.0-amd64 192.168.1.100:5000/flannel:v0.12.0-amd64
[root@master flannel]# docker push 192.168.1.100:5000/flannel:v0.12.0-amd64



[root@master flannel]# vim kube-flannel.yml
128: "Network": "10.244.0.0/16",
172: image: 192.168.1.100:5000/flannel:v0.12.0-amd64
186: image: 192.168.1.100:5000/flannel:v0.12.0-amd64
227-结尾: 删除
[root@master flannel]# kubectl apply -f kube-flannel.yml
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
    - configMap
    - secret
    - emptyDir
    - hostPath
  allowedHostPaths:
    - pathPrefix: "/etc/cni/net.d"
    - pathPrefix: "/etc/kube-flannel"
    - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  # SELinux
  seLinux:
    # SELinux is unused in CaaSP
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
rules:
  - apiGroups: ['extensions']
    resources: ['podsecuritypolicies']
    verbs: ['use']
    resourceNames: ['psp.flannel.unprivileged']
  - apiGroups:
      - ""
    resources:
      - pods
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - nodes
    verbs:
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - nodes/status
    verbs:
      - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-amd64
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: kubernetes.io/arch
                    operator: In
                    values:
                      - amd64
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.12.0-amd64
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.12.0-amd64
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-arm64
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: kubernetes.io/arch
                    operator: In
                    values:
                      - arm64
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.12.0-arm64
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.12.0-arm64
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-arm
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: kubernetes.io/arch
                    operator: In
                    values:
                      - arm
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.12.0-arm
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.12.0-arm
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-ppc64le
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: kubernetes.io/arch
                    operator: In
                    values:
                      - ppc64le
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.12.0-ppc64le
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.12.0-ppc64le
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds-s390x
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
              - matchExpressions:
                  - key: kubernetes.io/os
                    operator: In
                    values:
                      - linux
                  - key: kubernetes.io/arch
                    operator: In
                    values:
                      - s390x
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.12.0-s390x
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.12.0-s390x
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
             add: ["NET_ADMIN"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run/flannel
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg

 类似资料: