当前位置: 首页 > 工具软件 > NFS-Ganesha > 使用案例 >

k8s搭建gluster集群以及安装nfs-ganesha

景明诚
2023-12-01

一、部署gluster集群

1、使用daemon-set的方式进行部署,注意数据目录根据环境自行更改,这里是在每个服务器上创建一个目录/data/glusterfs做为glusterfs的实际存储目录

# glusterfs
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: glusterfs
  labels:
    glusterfs: daemonset
  annotations:
    description: GlusterFS DaemonSet
    tags: glusterfs
spec:
  template:
    metadata:
      name: glusterfs
      labels:
        glusterfs-node: pod
    spec:
      nodeSelector:
        storagenode: glusterfs
      hostNetwork: true
      containers:
      - image: gluster/gluster-centos:latest
        name: glusterfs
        volumeMounts:
        #数据目录挂载点
        - name: glusterfs-data
          mountPath: "/data"
        - name: glusterfs-heketi
          mountPath: "/var/lib/heketi"
        - name: glusterfs-run
          mountPath: "/run"
        - name: glusterfs-lvm
          mountPath: "/run/lvm"
        - name: glusterfs-etc
          mountPath: "/etc/glusterfs"
        - name: glusterfs-logs
          mountPath: "/var/log/glusterfs"
        - name: glusterfs-config
          mountPath: "/var/lib/glusterd"
        - name: glusterfs-dev
          mountPath: "/dev"
        - name: glusterfs-misc
          mountPath: "/var/lib/misc/glusterfsd"
        - name: glusterfs-cgroup
          mountPath: "/sys/fs/cgroup"
          readOnly: true
        - name: glusterfs-ssl
          mountPath: "/etc/ssl"
          readOnly: true
        securityContext:
          capabilities: {}
          privileged: true
        readinessProbe:
          timeoutSeconds: 3
          initialDelaySeconds: 60
          exec:
            command:
            - "/bin/bash"
            - "-c"
            - systemctl status glusterd.service
        livenessProbe:
          timeoutSeconds: 3
          initialDelaySeconds: 60
          exec:
            command:
            - "/bin/bash"
            - "-c"
            - systemctl status glusterd.service
      volumes:
      - name: glusterfs-heketi
        hostPath:
          path: "/var/lib/heketi"
      - name: glusterfs-run
      - name: glusterfs-lvm
        hostPath:
          path: "/run/lvm"
      - name: glusterfs-etc
        hostPath:
          path: "/etc/glusterfs"
      - name: glusterfs-logs
        hostPath:
          path: "/var/log/glusterfs"
      - name: glusterfs-config
        hostPath:
          path: "/var/lib/glusterd"
      - name: glusterfs-dev
        hostPath:
          path: "/dev"
      - name: glusterfs-misc
        hostPath:
          path: "/var/lib/misc/glusterfsd"
      - name: glusterfs-cgroup
        hostPath:
          path: "/sys/fs/cgroup"
      - name: glusterfs-ssl
        hostPath:
          path: "/etc/ssl"
      #这是服务器上的目录,可自行修改
      - name: glusterfs-data
        hostPath:
          path: "/data/glusterfs"

2、这里是使用主机模式进行部署的,可以在服务器上检查24007端口是否正常启动,服务正常启动后,进入到容器中向集群中添加节点

有几个节点就添加几个节点

gluster peer probe  1.2.3.4
peer probe: success

查看状态

gluster peer status

创建卷,我这里服务器上没有额外的数据盘,就使用了一个/data目录进行创建,生产环境中建议挂载数据盘。

分布式复制模式(组合型), 最少需要4台服务器才能创建。最后边有一个force参数,不加会报错

gluster volume create data-volume replica 2   10.102.23.41:/data 10.102.23.44:/data 10.102.23.42:/data 10.102.23.43:/data  10.102.23.44:/data 10.102.23.4:/data 10.102.23.45:/data  10.102.23.46:/data  force

启动创建的卷

gluster volume start data-volume

查看创建的卷

gluster volume info

查看分布式卷的状态

gluster volume status

3、nfs-ganesha环境搭建

yum -y install centos-release-gluster6

yum -y install nfs-ganesha nfs-ganesha-gluster

[root@admin-node ~]# vim /etc/ganesha/ganesha.conf
 
.....................................
 
EXPORT
 
{
 
## Export Id (mandatory, each EXPORT must have a unique Export_Id)
 
#Export_Id = 12345;
 
Export_Id = 10;
 
  
 
## Exported path (mandatory)
 
#Path = /nonexistant;
 
Path = /data01;
 
  
 
## Pseudo Path (required for NFSv4 or if mount_path_pseudo = true)
 
#Pseudo = /nonexistant;
 
Pseudo = /data01; #客户端通过nfs挂载的根目录
 
  
 
## Restrict the protocols that may use this export. This cannot allow
 
## access that is denied in NFS_CORE_PARAM.
 
#Protocols = 3,4;
 
Protocols = 4; #客户端nfs挂载的版本
 
  
 
## Access type for clients. Default is None, so some access must be
 
## given. It can be here, in the EXPORT_DEFAULTS, or in a CLIENT block
 
#Access_Type = RW;
 
Access_Type = RW; #权限问题
 
  
 
## Whether to squash various users.
 
#Squash = root_squash;
 
Squash = No_root_squash; #root降级
 
  
 
## Allowed security types for this export
 
#Sectype = sys,krb5,krb5i,krb5p;
 
Sectype = sys; #类型
 
  
 
## Exporting FSAL
 
#FSAL {
 
#Name = VFS;
 
#}
 
FSAL {
 
Name = GLUSTER;
 
hostname = "10.102.23.44"; #glusterfs管理节点IP
 
volume = "data-volume"; #glusterfs卷名
 
}
 
}
 
...................
 
[root@admin-node ~]# systemctl restart nfs-ganesha
 
[root@admin-node ~]# systemctl enable nfs-ganesha
 
[root@admin-node ~]# showmount -e 10.102.23.44
 
Export list for 10.102.23.44: #nfs-ganesha搭建成功
 

4、客户端挂载

以glusterfs方式挂载:

mkdir /logs
mount -t glusterfs 10.102.23.44:data-volume /logs

以NFS方式进行挂载

yum -y install nfs-utils rpcbind
 
systemctl start rpcbind
 
systemctl enable rpcbind
 
mkdir /home/dwweiyinwen/logs/
 
mount -t nfs -o vers=4,proto=tcp,port=2049 10.102.23.44:/data01 /home/dwweiyinwen/logs/

 类似资料: