一. 环境规划:
主机名 | 主机地址 | 角色 | 磁盘 |
node1 | 192.168.188.11 192.168.188.100(VIP) | 主服务器 | 系统盘sda:20G 存储盘sdb:20G |
node2 | 192.168.188.12 | 备服务器 | 系统盘sda:20G 存储盘sdb:20G |
二. 部署配置DRBD:按照上一篇文章" 共享存储DRBD的部署和配置 "部署。
三. 安装配置NFS:
1. 安装NFS,主备节点操作一致:
[root@node1 ~]# yum install rpcbind nfs-utils -y
2. 配置并启动NFS:
[root@node1 ~]# vim /etc/exports
/data 192.168.0.0/16(rw,sync,no_root_squash,no_all_squash)
[root@node1 ~]# systemctl restart rpcbind
[root@node1 ~]# systemctl restart nfs
[root@node1 ~]# systemctl enable nfs-server.service
参数配置说明:
1. rw:表示对/data有读写权限
2. sync:表示将文件同时写入硬盘和内存
3. no_root_squash:表示root用户不压缩成匿名用户
4. no_all_squash:表示其他用户不压缩成匿名用户
四. 安装并配置keepalived,主备节点都要操作:
1. 安装keepalived:
[root@node1 ~]# yum install keepalived -y
2. 配置keepalived配置文件:
##MASTER配置文件
[root@node1 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL_NFS1
}
vrrp_script chk_nfs {
script "killall -0 nfsd"
interval 2
weight -40
}
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
track_script {
chk_nfs
}
notify_stop /etc/keepalived/notify_stop.sh #当服务停止时执行的脚本
notify_master /etc/keepalived/notify_master.sh #当切换成主时执行的脚本
notify_backup /etc/keepalived/notify_backup.sh #当切换成备时执行的脚本
virtual_ipaddress {
#192.168.188.100/24 dev #表示将VIP绑定在指定网卡上
192.168.188.100/24 dev ens33 label ens33:1 #表示将VIP绑定在指定网卡的指定子接口上
}
}
##BACKUP配置文件:
[root@node2 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL_NFS2
}
vrrp_script chk_nfs {
script "killall -0 nfsd"
interval 2
weight -40
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
virtual_router_id 51
priority 80
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
track_script {
chk_nfs
}
notify_stop /etc/keepalived/notify_stop.sh #当服务停止时执行的脚本
notify_master /etc/keepalived/notify_master.sh #当切换成主时执行的脚本
notify_backup /etc/keepalived/notify_backup.sh #当切换成备时执行的脚本
virtual_ipaddress {
#192.168.188.100/24 dev #表示将VIP绑定在指定网卡上
192.168.188.100/24 dev ens33 label ens33:1
}
}
3. 配置从BACKUP切换到MASTER触发的脚本,主备脚本一致:
[root@node1 keepalived]# vim notify_master.sh
#!/bin/bash
time=$(date +"%F %T")
logname=notify_master
logdir=/etc/keepalived/logs
mountdir=/dev/drbd0
resname=nfs
[ -d ${logdir} ] || mkdir -p $logdir
echo -e "$time ----${logname}----\n" >> $logdir/${logname}.log
drbdadm primary $resname &>> $logdir/${logname}.log ##升级资源角色
mount $mountdir /data &>> $logdir/${logname}.log ##将设备挂载到挂载点上
systemctl start nfs-server &>> $logdir/${logname}.log ##启动nfs服务
echo -e "\n" >> $logdir/${logname}.log
4. 配置从MASTER切换到BACKUP触发的脚本,主备脚本一致:
[root@node1 keepalived]# vim notify_backup.sh
#!/bin/bash
time=$(date +"%F %T")
logname=notify_backup
logdir=/etc/keepalived/logs
mountdir=/dev/drbd0
resname=nfs
[ -d ${logdir} ] || mkdir -p $logdir
echo -e "$time ----${logname}----\n" >> $logdir/${logname}.log
systemctl stop nfs-server &>> $logdir/${logname}.log ##停止nfs服务
umount /data &>> $logdir/${logname}.log ##卸载
drbdadm secondary $resname &>> $logdir/${logname}.log ##降级资源角色
echo -e "\n" >> $logdir/${logname}.log
5. 配置停止服务时触发的脚本,主备脚本一致:
[root@node1 keepalived]# vim notify_stop.sh
#!/bin/bash
time=$(date +"%F %T")
logname=notify_stop
logdir=/etc/keepalived/logs
mountdir=/dev/drbd0
resname=nfs
[ -d ${logdir} ] || mkdir -p $logdir
echo -e "$time ----${logname}----\n" >> $logdir/${logname}.log
systemctl stop nfs-server &>> $logdir/${logname}.log
umount /data &>> $logdir/${logname}.log
drbdadm secondary $resname &>> $logdir/${logname}.log
echo -e "\n" >> $logdir/${logname}.log
6. 为脚本添加执行权限,主备节点操作一致:
[root@node1 keepalived]# chmod +x notify_stop.sh
[root@node1 keepalived]# chmod +x notify_backup.sh
[root@node1 keepalived]# chmod +x notify_master.sh
7. 重启keepalived:
[root@node1 ~]# systemctl restart keepalived.service
五. 测试高可用,共享存储是否正常:
1. 查看VIP的位置是否在node1上:
[root@node1 ~]# ip a | grep 192.168.188.100
inet 192.168.188.100/24 scope global secondary ens33:1
2. 查看node1上的nfs是否启动,node2上的nfs是否是关闭:
[root@node1 ~]# systemctl is-active nfs
active
[root@node2 ~]# systemctl is-active nfs
inactive
3. 查看node1:/data/下是否有数据,node2:/data/下是否没有数据:
[root@node1 ~]# ls /data
centos-release centos-release-upstream cron.deny crontab crypttab
csh.cshrc csh.login
[root@node2 ~]# ls /data/
[root@node2 ~]#
4. node1停止nfs服务,node2开启nfs服务;并查看VIP是否飘移:
[root@node1 ~]# systemctl stop nfs
[root@node2 ~]# systemctl start nfs
[root@node1 ~]# ip a | grep 192.168.188.100
[root@node1 ~]#
[root@node2 ~]# ip a | grep 192.168.188.100
inet 192.168.188.100/24 scope global secondary ens33:1
5. 查看node2:/data/下是否有数据:
[root@node2 ~]# ls /data/
centos-release centos-release-upstream cron.deny crontab crypttab
csh.cshrc csh.login