saltstack实现一键部署keepalived+haproxy的高可用负载均衡集群

单于高逸
2023-12-01

配置环境

主机名ip服务
server1172.25.1.1salt-master、salt-minion haproxy、keepalived
server2172.25.1.2salt-minion , httpd
server3172.25.1.3salt-minion , nginx
server4172.25.1.4salt-minion , haproxy、keepalived

此文章只针对keepalived的自动化部署
httpd,nginx部署参考
https://blog.csdn.net/u010489158/article/details/83757043
haproxy部署参考
https://blog.csdn.net/u010489158/article/details/83831040


keepalived的安装部署

创建salt脚本存放目录

[root@server1 salt]# cd /srv/salt
[root@server1 salt]# mkdir keepalived
[root@server1 keepalived]# mkdir file   #配置文件和软件包存放目录

编辑安装脚本

[root@server1 keepalived]# vim make.sls    #编辑安装脚本 
include:
  - haproxy.yum    #使用了haproxy中的yum源安装脚本

/mnt/libnfnetlink-devel-1.0.0-1.el6.x86_64.rpm:    #依赖性,镜像中没有,因此需要手动装
  file.managed:
    - source: salt://keepalived/file/libnfnetlink-devel-1.0.0-1.el6.x86_64.rpm
keepalived-install:
  pkg.installed:   #依赖性
    - pkgs:
      - libnl-devel
      - openssl-devel
      - iptables-devel
      - gcc
  file.managed:    #keepalived的安装包
    - name: /mnt/keepalived-2.0.6.tar.gz
    - source: salt://keepalived/file/keepalived-2.0.6.tar.gz
  cmd.run:   #需要执行的shell命令
    - name: cd /mnt && yum install -y libnfnetlink-devel-1.0.0-1.el6.x86_64.rpm && tar zxf keepalived-2.0.6.tar.gz && cd keepalived-2.0.6 && ./configure --prefix=/usr/local/keepalived --with-init=SYSV && make && make install 
    - create: /usr/local/keepalived    #如果此目录存在则不进行安装

/etc/keepalived:    #修改文件权限
  file.directory:
    - mode: 755

/etc/sysconfig/keepalived:    #创建文件软链接,将target中的目录链接到此位置
  file.symlink:
    - target: /usr/local/keepalived/etc/sysconfig/keepalived

/sbin/keepalived:
  file.symlink:
    - target: /usr/local/keepalived/sbin/keepalived

keepalived服务的启动脚本

[root@server1 keepalived]# vim service.sls 
include:
  - keepalived.make    #包含了keepalived的安装脚本,实现一键部署
/etc/keepalived/keepalived.conf:
  file.managed:    #文件管理
    - source: salt://keepalived/file/keepalived.conf
    - template: jinja   #使用了jinja模块
    - context:
      STATE: {{ pillar['state'] }}    #使用了pillar定义变量
      PRIORITY: {{ pillar['priority'] }}
/opt/check_haproxy.sh:
  file.managed:
    - source: salt://keepalived/file/check_haproxy.sh
    - mode: 755
keepalived-service:
  file.managed:
    - name: /etc/init.d/keepalived
    - source: salt://keepalived/file/keepalived
    - mode: 755

  service.running:
    - enable: keepalived
    - name: keepalived
    - reload: True
    - watch: 
      - file: /etc/keepalived/keepalived.conf

keepalived脚本中需要的file

[root@server1 file]# cd /srv/salt/keepalived/file
[root@server1 file]# ls
check_haproxy.sh         keepalived.conf
keepalived               libnfnetlink-devel-1.0.0-1.el6.x86_64.rpm
keepalived-2.0.6.tar.gz

keepalived中对haproxy的健康检查脚本

[root@server1 file]# cat check_haproxy.sh  
#!bin/bash

/etc/init.d/haproxy status &> /dev/null || /etc/init.d/haproxy restart &> /dev/null
# 如果haproxy的状态是打开的,不做任何事情,如果haproxy的状态是关闭的,那么重新打开haproxy
if [ $? -ne 0 ];then
/etc/init.d/keepalived stop &> /dev/null
fi
# 如果重新打开haproxy的操作返回值非0,那么说明haproxy出现故障,此时由脚本关闭keepalived,将提供服务的节点转移

keepalived的配置文件

[root@server1 file]# cat keepalived.conf 
! Configuration File for keepalived

global_defs {
   notification_email {
	root@localhost
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
   vrrp_skip_check_adv_addr
   #vrrp_strict
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_script check_haproxy {   
        script "/opt/check_haproxy.sh"
        interval 2 
        weight 2
}

vrrp_instance VI_1 {
    state {{ STATE }}    #pillar值
    interface eth0
    virtual_router_id 51 
    priority {{ PRIORITY }}   #pillar值
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
	172.25.1.100
    }

   track_script {   #需要写在vip的后面
        check_haproxy
    }
}


}

keepalived的启动脚本

[root@server1 file]# cat keepalived
#!/bin/sh
#
# Startup script for the Keepalived daemon
#
# processname: keepalived
# pidfile: /var/run/keepalived.pid
# config: /etc/keepalived/keepalived.conf
# chkconfig: - 21 79
# description: Start and stop Keepalived

# Source function library
. /etc/rc.d/init.d/functions

# Source configuration file (we set KEEPALIVED_OPTIONS there)
. /etc/sysconfig/keepalived

RETVAL=0

prog="keepalived"

start() {
    echo -n $"Starting $prog: "
    daemon keepalived ${KEEPALIVED_OPTIONS}
    RETVAL=$?
    echo
    [ $RETVAL -eq 0 ] && touch /var/lock/subsys/$prog
}

stop() {
    echo -n $"Stopping $prog: "
    killproc keepalived
    RETVAL=$?
    echo
    [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/$prog
}

reload() {
    echo -n $"Reloading $prog: "
    killproc keepalived -1
    RETVAL=$?
    echo
}

# See how we were called.
case "$1" in
    start)
        start
        ;;
    stop)
        stop
        ;;
    reload)
        reload
        ;;
    restart)
        stop
        start
        ;;
    condrestart)
        if [ -f /var/lock/subsys/$prog ]; then
            stop
            start
        fi
        ;;
    status)
        status keepalived
        RETVAL=$?
        ;;
    *)
        echo "Usage: $0 {start|stop|reload|restart|condrestart|status}"
        RETVAL=1
esac

exit $RETVAL

keepalived中需要用到的pillar定义

[root@server1 web]# vim /srv/pillar/web/install.sls
{% if grains['fqdn'] == 'server1' %}
state: MASTER
priority: 100
{% elif grains['fqdn'] == 'server4' %}
state: BACKUP
priority: 50
{% endif %}

[root@server1 pillar]# cd /srv/pillar
[root@server1 pillar]# vim top.sls 
base:
  '*':
    - web.install

全局推送脚本

[root@server1 salt]# vim /srv/salt/top.sls 
base:
  'server1':
    - haproxy.service
    - keepalived.service
  'server4':
    - haproxy.service
    - keepalived.service
  'server2':
    - apache.install
  'server3':
    - nginx.service

 [root@server1 salt]# salt '*' state.highstate 

测试

访问vip,实现了nginx和apache轮询访问

[root@foundation1 ~]# curl 172.25.1.100
this is nginx!!!!
[root@foundation1 ~]# curl 172.25.1.100
this is apache
[root@foundation1 ~]# curl 172.25.1.100
this is nginx!!!!
[root@foundation1 ~]# curl 172.25.1.100
this is apache
[root@foundation1 ~]# curl 172.25.1.100
this is nginx!!!!

vip添加到了master(server1)中

[root@server1 salt]# ip add show eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 52:54:00:37:6b:32 brd ff:ff:ff:ff:ff:ff
    inet 172.25.1.1/24 brd 172.25.1.255 scope global eth0
    inet 172.25.1.100/32 scope global eth0
    inet6 fe80::5054:ff:fe37:6b32/64 scope link 
       valid_lft forever preferred_lft forever

keepalived中对haproxy健康检查脚本

[root@server1 ~]# /etc/init.d/haproxy stop
Shutting down haproxy:                                     [确定]
[root@server1 ~]# /etc/init.d/haproxy status   #发现停掉haproxy以后,服务自动启动
haproxy (pid  13370) 正在运行...

使haproxy无法自动启动

[root@server1 init.d]# mv haproxy /mnt
[root@server1 mnt]# ./haproxy stop
Shutting down haproxy:                                     [确定]
[root@server1 mnt]# ./haproxy status
haproxy 已停
[root@server1 mnt]# ip add   #vip转移
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 52:54:00:37:6b:32 brd ff:ff:ff:ff:ff:ff
    inet 172.25.1.1/24 brd 172.25.1.255 scope global eth0
    inet6 fe80::5054:ff:fe37:6b32/64 scope link 
       valid_lft forever preferred_lft forever


##vip转移到server4中
[root@server4 rpmbuild]# ip add show eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 52:54:00:59:6c:d8 brd ff:ff:ff:ff:ff:ff
    inet 172.25.1.4/24 brd 172.25.1.255 scope global eth0
    inet 172.25.1.100/32 scope global eth0
    inet6 fe80::5054:ff:fe59:6cd8/64 scope link 
       valid_lft forever preferred_lft forever
 类似资料: