实验环境:
Server1172.25.254.1maseter/minionkeepalived/haproxy
Server2172.25.254.2minionhttpd
Server3172.25.254.3minionnginx
Server4172.25.254.4minionkeepalived/haproxy
配置一台新的server4进行配置salt-minion:
[root@server4 ~]# yum install salt-minion -y ## 配置完整yum源
[root@server4 ~]# cd /etc/salt/
[root@server4 salt]# ls
cloud cloud.maps.d master minion.d proxy.d
cloud.conf.d cloud.profiles.d master.d pki roster
cloud.deploy.d cloud.providers.d minion proxy
[root@server4 salt]# vim minion 填写master端的IP
master: 172.25.254.1
[root@server4 salt]# /etc/init.d/salt-minion start 开启服务
Starting salt-minion:root:server4 daemon: OK
交换公钥,添加server4到server1master端
[root@server1 files]# salt-key -A 添加公钥匹配
The following keys are going to be accepted:
Unaccepted Keys:
server4
Proceed? [n/Y] y
Key for minion server4 accepted.
[root@server1 files]# salt-key -L 查询添加成功
Accepted Keys:
server1
server2
server3
server4
Denied Keys:
Unaccepted Keys:
Rejected Keys:
配置高可用模块keepalived:
[root@server1 ~]# cd /srv/salt/
[root@server1 salt]# ls
_grains haproxy httpd nginx pkgs top.sls users
[root@server1 salt]# mkdir keepalived
[root@server1 salt]# cd keepalived/
[root@server1 keepalived]# ls
[root@server1 keepalived]# vim install.sls
[root@server1 keepalived]# cat install.sls
kp-install:
file.managed:
- name: /mnt/keepalived-2.0.6.tar.gz
- source: salt://keepalived/files/keepalived-2.0.6.tar.gz
[root@server1 keepalived]# mkdir files
[root@server1 keepalived]# cd files/
[root@server1 files]# ls
[root@server1 files]# pwd
/srv/salt/keepalived/files
[root@server1 files]# ls
keepalived-2.0.6.tar.gz
在server1上推送keepalived
[root@server1 files]# salt server4 state.sls keepalived.install
推送成功结果:在server4的/mnt/目录下可以看到推送过来的安装包
[root@server4 salt]# cd /mnt/
[root@server4 mnt]# ls
keepalived-2.0.6.tar.gz
在server1完善keepalived的安装脚本进行推送:
[root@server1 keepalived]# cd ..
[root@server1 salt]# ls
_grains haproxy httpd keepalived nginx pkgs top.sls users
[root@server1 salt]# cd pkgs/
[root@server1 pkgs]# ls
make.sls
[root@server1 pkgs]# cat make.sls ## 源码编译一些依赖性的软件包
make-gcc:
pkg.installed:
- pkgs:
- pcre-devel
- openssl-devel
- gcc
[root@server1 pkgs]# cd ..
[root@server1 salt]# cd keepalived/
[root@server1 keepalived]# ls
files install.sls
[root@server1 keepalived]# vim install.sls
[root@server1 keepalived]# cat install.sls
include:
- pkgs.make 导入工具包
kp-install:
file.managed:
- name: /mnt/keepalived-2.0.6.tar.gz
- source: salt://keepalived/files/keepalived-2.0.6.tar.gz
cmd.run: 进行源码编译不显示过程信息
- name: cd /mnt && tar zxf keepalived-2.0.6.tar.gz && cd keepalived-2.0.6 && ./configure --prefix=/usr/local/keepalived --with-init=SYSV &> /dev/null && make &> /dev/null && make install &> /dev/null
- creates: /usr/local/keepalived
[root@server1 keepalived]# salt server4 state.sls keepalived.install 进行推送在server4安装keepalived
安装过程中在server4可以查看相应进程
[root@server4 mnt]# ps ax
在server4的对应安装位置上已经有了keepalived
[root@server4 mnt]# ls
keepalived-2.0.6 keepalived-2.0.6.tar.gz
[root@server4 mnt]# ll /usr/local/keepalived/
total 16
drwxr-xr-x 2 root root 4096 Aug 18 10:42 bin
drwxr-xr-x 5 root root 4096 Aug 18 10:42 etc
drwxr-xr-x 2 root root 4096 Aug 18 10:42 sbin
drwxr-xr-x 5 root root 4096 Aug 18 10:42 share
配置Keepalived的高可用
将server4的keepalived的配置文件/usr/local/keepalived/etc/keepalived/keepalived.conf和启动脚本/usr/local/keepalived/etc/rc.d/init.d/keepalived发送到server1的/srv/salt/keepalived/files目录下
[root@server4 mnt]# cd /usr/local/keepalived/etc/rc.d/init.d/
[root@server4 init.d]# scp keepalived server1:/srv/salt/keepalived/files ## keepalived的启动脚本
[root@server4 ~]# cd /usr/local/keepalived/etc/keepalived/
[root@server4 keepalived]# ls
keepalived.conf samples
[root@server4 keepalived]# scp keepalived.conf server1:/srv/salt/keepalived/files ## 配置文件
在server1配置安装脚本制作相应的软链接:
[root@server1 ~ ]# cd /srv/salt/keepalived
[root@server1 keepalived]# ls
files install.sls
[root@server1 keepalived]# vim install.sls
[root@server1 keepalived]# cat install.sls
include:
- pkgs.make
kp-install:
file.managed:
- name: /mnt/keepalived-2.0.6.tar.gz
- source: salt://keepalived/files/keepalived-2.0.6.tar.gz
cmd.run:
- name: cd /mnt && tar zxf keepalived-2.0.6.tar.gz && cd keepalived-2.0.6 && ./configure --prefix=/usr/local/keepalived --with-init=SYSV &> /dev/null && make &> /dev/null && make install &> /dev/null
- creates: /usr/local/keepalived
/etc/keepalived:
file.directory:
- mode: 755 脚本的权限
/etc/sysconfig/keepalived: 制作软链接
file.symlink:
- target: /usr/local/keepalived/etc/sysconfig/keepalived
/sbin/keepalived: 制作软链接
file.symlink:
- target: /usr/local/keepalived/sbin/keepalived
[root@server1 keepalived]# salt server4 state.sls keepalived.install 进行推送
在server4可以看到对应的软链接代表推送成功:
在serevr1安装打开keepalived服务的脚本:
[root@server1 keepalived]# pwd
/srv/salt/keepalived
[root@server1 keepalived]# vim service.sls
[root@server1 keepalived]# cat service.sls
include:
- keepalived.install 导入安装脚本
/etc/keepalived/keepalived.conf:
file.managed:
- source: salt://keepalived/files/keepalived.conf
kp-service:
file.managed:
- name: /etc/init.d/keepalived
- source: salt://keepalived/files/keepalived
- mode: 755
service.running:
- name: keepalived
- reload: True
- watch:
- file: /etc/keepalived/keepalived.conf
[root@server1 keepalived]# cd /srv/pillar/ 进去pillar模块
[root@server1 pillar]# ls
top.sls web
[root@server1 pillar]# cd web/
[root@server1 web]# ls
install.sls
[root@server1 web]# vim install.sls
[root@server1 web]# cat install.sls
{% if grains['fqdn'] == 'server2' %} 如果主机名字为server2
webserver: httpd
bind: 172.25.254.2
port: 80
{% elif grains['fqdn'] == 'server3' %}
webserver: nginx
{% endif %}
[root@server1 web]# cd ..
[root@server1 pillar]# ls
top.sls web
[root@server1 pillar]# mkdir keepalived
[root@server1 pillar]# cd keepalived/
[root@server1 keepalived]# ls
[root@server1 keepalived]# cp ../web/install.sls .
[root@server1 keepalived]# vim install.sls
[root@server1 keepalived]# cat install.sls
{% if grains['fqdn'] == 'server1' %}
state: MASTER 写入状态MASTER还是BACKUP测试高可用
vrid: 38
priority: 100 写入优先级
{% elif grains['fqdn'] == 'server4' %}
state: BACKUP
vrid: 38
priority: 50
{% endif %}
[root@server1 keepalived]# ls
install.sls
[root@server1 keepalived]# cd ..
[root@server1 pillar]# ls
keepalived top.sls web
[root@server1 pillar]# vim top.sls
[root@server1 pillar]# cat top.sls
base:
'*':
- web.install
- keepalived.install
[root@server1 pillar]# cd ..
[root@server1 srv]# cd salt/
[root@server1 salt]# ls
_grains haproxy apache keepalived nginx pkgs top.sls users
[root@server1 salt]# cd keepalived/
[root@server1 keepalived]# ls
files install.sls service.sls
配置keppalived的一键安装并打开服务加入优先级:
[root@server1 keepalived]# vim service.sls
[root@server1 keepalived]# cat service.sls
include:
- keepalived.install
/etc/keepalived/keepalived.conf:
file.managed:
- source: salt://keepalived/files/keepalived.conf:
- template: jinja
- context:
STATE: {{ pillar['state'] }}
VRID: {{ pillar['vrid'] }}
PRIORITY: {{ pillar['priority'] }}
kp-service:
file.managed:
- name: /etc/init.d/keepalived
- source: salt://keepalived/files/keepalived
- mode: 755
service.running:
- name: keepalived
- reload: True
- watch:
- file: /etc/keepalived/keepalived.conf
[root@server1 keepalived]# vim files/keepalived
[root@server1 keepalived]# ls
files install.sls service.sls
[root@server1 keepalived]# cd files/
[root@server1 files]# ls
keepalived keepalived-2.0.6.tar.gz keepalived.conf
[root@server1 files]# vim keepalived.conf 编辑主配置文件写入虚拟IP
[root@server1 keepalived]# salt server4 state.sls keepalived.service 推送service服务:
在server4可以看到对应的配置文件还有脚本查看有相应进程:
在server4查看已经接管虚拟IP:
[root@server4 keepalived]# ip addr
2: eth0:
inet 172.25.254.4/24 brd 172.25.254.255 scope global eth0
inet 172.25.254.100/32 scope global eth0
在server1编写top.sls脚本在对应节点安装服务:
[root@server1 keepalived]# pwd
/srv/salt/keepalived
[root@server1 keepalived]# cd ..
[root@server1 salt]# ls
_grains haproxy apache keepalived nginx pkgs top.sls users
[root@server1 salt]# vim top.sls
[root@server1 salt]# cat top.sls
base:
'server1':
- haproxy.install
- keepalived.service
'server4':
- haproxy.install
- keepalived.service
'roles:apache':
- match: grain
- apache.service
'roles:nginx':
- match: grain
- nginx.service
[root@server1 salt]# salt '*' state.highstate
在server4查看已经有相应的haproxy和keepalived的进程:
[root@server4 keepalived]# ps ax
[root@server4 keepalived]# ip addr 虚拟IP已经转换到server1
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 52:54:00:52:c1:00 brd ff:ff:ff:ff:ff:ff
inet 172.25.254.4/24 brd 172.25.254.255 scope global eth0
inet6 fe80::5054:ff:fe52:c100/64 scope link
valid_lft forever preferred_lft forever
在网页测试后端可以负载均衡:
将server1的keepalived服务关闭:
[root@server1 salt]# /etc/init.d/keepalived stop
Stopping keepalived: [ OK ]
在网页再次测试后端依旧可以负载均衡
server4接管虚拟IP成为新的master:
[root@server4 keepalived]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 52:54:00:52:c1:00 brd ff:ff:ff:ff:ff:ff
inet 172.25.254.4/24 brd 172.25.254.255 scope global eth0
inet 172.25.254.100/32 scope global eth0
inet6 fe80::5054:ff:fe52:c100/64 scope link
valid_lft forever preferred_lft forever
server1再次打开keepalived会抢回来master的身份:
[root@server1 salt]# /etc/init.d/keepalived start
Starting keepalived: [ OK ]
[root@server1 salt]# ip addr 已经接管虚拟IP
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 52:54:00:58:4d:1f brd ff:ff:ff:ff:ff:ff
inet 172.25.38.1/24 brd 172.25.38.255 scope global eth0
inet 172.25.38.100/32 scope global eth0
inet6 fe80::5054:ff:fe58:4d1f/64 scope link
valid_lft forever preferred_lft forever
调用脚本控制高可用
[root@server1 files]# cd /opt/
[root@server1 opt]# ls
[root@server1 opt]# vim check_haproxy.sh
[root@server1 opt]# cat check_haproxy.sh 脚本意思就是当haproxy服务出现故障时就停止keepalived
#!/bin/bash
/etc/init.d/haproxy status &> /dev/null || /etc/init.d/haproxy restart &> /dev/null
if [ $? -ne 0 ];then
/etc/init.d/keepalived stop &> /dev/null
fi
[root@server1 opt]# chmod +x check_haproxy.sh 给脚本赋予执行权限
[root@server1 opt]# /etc/init.d/haproxy status
haproxy (pid 2877) is running...
[root@server1 opt]# echo $?
0
[root@server1 opt]# cd /etc/keepalived/
[root@server1 keepalived]# ls
keepalived.conf
[root@server1 opt]#scp check_haproxy.sh server4:/opt/ 将脚本传递到server4,必须添加可执行权限不然脚本没办法执行
[root@server1 keepalived]# vim /srv/salt/keepalived/files/keepalived.conf
[root@server1 keepalived]# cat /srv/salt/keepalived/files/keepalived.conf
! Configuration File for keepalived
vrrp_script check_haproxy {
script "/opt/check_haproxy.sh" 同样添加脚本的调用
interval 2
weight 2
}
global_defs {
notification_email {
root@localhost
}
notification_email_from keepalived@localhost
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
# vrrp_strict
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_instance VI_1 {
state {{ STATE }}
interface eth0
virtual_router_id {{ VRID }}
priority {{ PRIORITY }}
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
172.25.254.100
}
track_script{
check_haproxy
}
}
进行推送测试:
[root@server1 keepalived]# salt server4 state.sls keepalived.service
在server1减去haproxy脚本的执行权限,脚本生效相应的keepalived服务会关闭:
[root@server1 ~]# cd /etc/init.d/
[root@server1 init.d]# ls
auditd halt keepalived netconsole rdisc rsyslog saslauthd
blk-availability haproxy killall netfs restorecond salt-master single
crond ip6tables lvm2-lvmetad network rhnsd salt-minion sshd
functions iptables lvm2-monitor postfix rhsmcertd sandbox udev-post
[root@server1 init.d]# chmod -x haproxy
[root@server1 init.d]# /etc/init.d/keepalived status
keepalived is stopped
MASTER会转换到serevr4,直接的效果就是server4接管虚拟IP
[root@server4 opt]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 52:54:00:52:c1:00 brd ff:ff:ff:ff:ff:ff
inet 172.25..254.4/24 brd 172.25.254.255 scope global eth0
inet 172.25.254.100/32 scope global eth0
inet6 fe80::5054:ff:fe52:c100/64 scope link
valid_lft forever preferred_lft forever
验证高可用
[root@server1 init.d]# ls
auditd halt keepalived netconsole rdisc rsyslog saslauthd
blk-availability haproxy killall netfs restorecond salt-master single
crond ip6tables lvm2-lvmetad network rhnsd salt-minion sshd
functions iptables lvm2-monitor postfix rhsmcertd sandbox udev-post
[root@server1 init.d]# chmod +x haproxy 重新增加可执行权限
[root@server1 init.d]# ll haproxy
-rwxr-xr-x 1 root root 2298 Jul 10 2013 haproxy
[root@server1 init.d]# /etc/init.d/keepalived status
keepalived is stopped
[root@server1 init.d]# /etc/init.d/keepalived start 打开keepalived服务
Starting keepalived: [ OK ]
[root@server1 init.d]# ip addr server1依旧会抢回来MASTER的角色进行接管虚拟IP
[root@server1 init.d]# salt server1 service.start keepalived 打开keepalived服务
server1:
True
[root@server1 init.d]# ls
auditd halt keepalived netconsole rdisc rsyslog saslauthd
blk-availability haproxy killall netfs restorecond salt-master single
crond ip6tables lvm2-lvmetad network rhnsd salt-minion sshd
functions iptables lvm2-monitor postfix rhsmcertd sandbox udev-post
[root@server1 init.d]# cd
[root@server1 ~]# salt-cp '*' /etc/passwd /tmp 将/etc/passwd文件同步到所有主机的/tmp目录下
在server2查看已经推送成功,拿server2举例子,不一一进行查看后端:
[root@server2 ~]# cd /tmp/
[root@server2 tmp]# ls
passwd yum.log yum_save_tx-2018-08-17-09-30GSgtBm.yumtx
[root@server1 ~]# salt '*' cmd.run 'rm -f /tmp/passwd' 调用salt命令删除传递过去的passwd文件
server4:
server2:
server3:
server1:
[root@server1 ~]# cd /tmp/ 在server1查看已经被删除
[root@server1 tmp]# ls
yum.log
在server2查看passwd文件已经被删除:
[root@server2 tmp]# ls
yum.log yum_save_tx-2018-08-17-09-30GSgtBm.yumtx
[root@server1 tmp]# salt server3 state.single pkg.installed httpd 直接调用模块命令安装apache
在serevr3查看httpd已经被安装:
[root@server3 ~]# rpm -q httpd
httpd-2.2.15-29.el6_4.x86_64