实验环境拓扑图:
备注:内网段使用192.168.91.0/24 网段模拟。外网使用192.168.23.0/24网段模拟
1、两节点上关闭防火墙和selinux。
[root@node1 keepalived]# systemctl stop firewalld #关闭防火墙
[root@node1 keepalived]# systemctl disable firewalld #关闭防火墙自动启动
#如果不想关闭防火墙就需要开放组播地址224.0.0.18.两个节点之间的是通过这个组播地址发送VRRP相关信息,主要是节点心跳、优先级等信息
[root@node1 keepalived]# systemctl list-unit-files | grep firewalld #验证是否关闭自动启动。disabled为关闭
firewalld.service disabled
#关闭selinux。这个不关闭有可能无法启动keepalived的服务,目前还没有找到解决方案
[root@node1 keepalived]# setenforce 0 # 临时关闭
[root@node1 keepalived]# vim /etc/selinux/config #永久关闭。disabled为关闭。需要重启系统才能生效
SELINUX=disabled
2、两节点上的时间必须同步。
centos 7 上使用chrony 这个软件实现时间同步,和ntp 类似,据说功能比ntp强大。安装上这个软件即可实现同步,不需要进行额外的配置。
[root@node1 keepalived]# yum -y install chrony #两个节点上都安装上如果不能上互联网需要配置/etc/chrony.conf,将NTP服务器域名或IP地址加入此文件中
[root@node1 keepalived]# vim /etc/chrony.conf
不能上互联网需要将这四行注释,其实不注释也是可以的,只要将内网NTP服务器加到第一条即可
server ntp.centos7.cn #NTP 服务器的域名:ntp.centos7.cn 可以解析此域名
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
[root@node1 keepalived]# date;ssh node2 'date'
Thu Mar 3 17:55:31 CST 2016
Thu Mar 3 17:55:32 CST 2016
3、两节点之间ssh通过使用密钥访问
#生成公/私钥对。
[root@node1 keepalived]# ssh-keygen #敲两下回车。公/似钥存储的目录/root/.ssh/
#将node1 生成的公钥信息传递到node2 /root/.ssh/目录
[root@node1 .ssh]# cp id_rsa.pub root@node2:/root/.ssh/id_rsa.pub
#在node2 上将node1 的公钥导入到authorized_keys文件里。原/root/.ssh/目录下没有这个文件就会新建这个文件,有的话就会继续向里面附加内容
[root@node2 .ssh]# cat id_rsa.pub >>authorized_keys
在node2 节点上删除node1 的公钥
[root@node1 .ssh]# rm -f id_rsa.pub
node2 也需要生成公/私钥对,将复制到node1 /root/.ssh/目录下,并将其导入authorized_keys,然后删除node2 的公钥文件
4、两个节点上配置host文件,让两个节点能通过主机名进行通讯.编辑/etc/hosts文件。加如下内容:
192.168.91.129 centos7.cn node1
192.168.91.130 centos7.cn node2
5、两个节点上开启路由转发功能
vim /etc/sysctl.conf
net.ipv4.ip_forward=1
[root@node1 ~]# sysctl -p
net.ipv4.ip_forward = 1
确认路由转发功能是否开启。1为开启
[root@node1 keepalived]# cat /proc/sys/net/ipv4/ip_forward
1
6、在两个节点上安装keepalived 和ipvsadm(这个不是必须的,安装了便于查看LVS的相关信息)
yum -y install keepalived ipvsadm;ssh node2 'yum -y install keepalived ipvsadm'
node1 节点上keepalived.conf 配置文件
global_defs
{
notification_email
{
root@localhost
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS_MASTER
}
vrrp_instance VI_1
{
state MASTER
interface eno16777736
virtual_router_id 51
priority 100
advert_int 1
authentication
{
auth_type PASS
auth_pass abbac1e595fe
}
virtual_ipaddress
{
192.168.91.15/32 dev eno16777736 label eno16777736:0
}
virtual_routes
{
192.168.91.15/32 dev eno16777736:0
}
}
vrrp_instance VI_2
{
state BACKUP
interface eno16777736
virtual_router_id 52
priority 99
advert_int 1
authentication
{
auth_type PASS
auth_pass 1e67cca200cf
}
virtual_ipaddress
{
192.168.91.16/32 dev eno16777736 label eno16777736:1
}
virtual_routes
{
192.168.91.16/32 dev eno16777736:1
}
}
vrrp_instance VI_3
{
state MASTER
interface eno33554984
virtual_router_id 53
priority 100
advert_int 1
authentication
{
auth_type PASS
auth_pass e027a03bcd81
}
virtual_ipaddress
{
192.168.23.15/32 dev eno33554984 label eno33554984:0
}
virtual_routes
{
192.168.23.15/32 dev eno33554984:0
}
}
vrrp_instance VI_4
{
state BACKUP
interface eno33554984
virtual_router_id 54
priority 99
advert_int 1
authentication
{
auth_type PASS
auth_pass f03c1c91c7fc
}
virtual_ipaddress
{
192.168.23.14/32 dev eno33554984 label eno33554984:1
}
virtual_routes
{
192.168.23.14/32 dev eno33554984:1
}
}
virtual_server 192.168.91.15 80
{
delay_loop 6
lb_algo wrr
lb_kind NAT
protocol TCP
real_server 192.168.23.16 80
{
weight 6
HTTP_GET
{
url
{
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.23.18 80
{
weight 3
HTTP_GET
{
url
{
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
virtual_server 192.168.91.16 80
{
delay_loop 6
lb_algo wrr
lb_kind NAT
protocol TCP
real_server 192.168.23.19 80
{
weight 6
HTTP_GET
{
url
{
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.23.17 80
{
weight 3
HTTP_GET
{
url
{
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
[root@node1 keepalived]# systemctl start keepalived #启动keepalived服务
[root@node1 keepalived]# systemctl enable keepalived # 开机自动启动keepalived服务
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
[root@node1 keepalived]# systemctl list-unit-files | grep keepalived #验证开机是否自动启动keepalived服务
keepalived.service enabled
node1 ipvs相关信息
[root@node1 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.91.15:80 wrr
-> 192.168.23.16:80 Masq 6 0 0
-> 192.168.23.18:80 Masq 3 0 0
TCP 192.168.91.16:80 wrr
-> 192.168.23.19:80 Masq 6 0 0
-> 192.168.23.17:80 Masq 3 0 0
node1 IP地址信息
[root@node1 keepalived]# ifconfig
eno16777736: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.91.129 netmask 255.255.255.0 broadcast 192.168.91.255
inet6 fe80::20c:29ff:fec1:fe33 prefixlen 64 scopeid 0x20<link>
ether 00:0c:29:c1:fe:33 txqueuelen 1000 (Ethernet)
RX packets 31637 bytes 5942061 (5.6 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 30175 bytes 2627032 (2.5 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
eno16777736:0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.91.15 netmask 255.255.255.255 broadcast 0.0.0.0
ether 00:0c:29:c1:fe:33 txqueuelen 1000 (Ethernet)
eno33554984: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.23.11 netmask 255.255.255.0 broadcast 192.168.23.255
inet6 fe80::20c:29ff:fec1:fe3d prefixlen 64 scopeid 0x20<link>
ether 00:0c:29:c1:fe:3d txqueuelen 1000 (Ethernet)
RX packets 63347 bytes 8511811 (8.1 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 81785 bytes 6504879 (6.2 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
eno33554984:0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.23.15 netmask 255.255.255.255 broadcast 0.0.0.0
ether 00:0c:29:c1:fe:3d txqueuelen 1000 (Ethernet)
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10<host>
loop txqueuelen 0 (Local Loopback)
RX packets 146 bytes 10759 (10.5 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 146 bytes 10759 (10.5 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
node2 节点上keepalived.conf 配置文件
global_defs
{
notification_email
{
root@localhost
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS_BACKUP
}
vrrp_instance VI_1 {
state BACKUP
interface eno16777736
virtual_router_id 51
priority 99
advert_int 1
authentication
{
auth_type PASS
auth_pass abbac1e595fe
}
virtual_ipaddress
{
192.168.91.15/32 dev eno16777736 label eno16777736:0
}
virtual_routes
{
192.168.91.15/32 dev eno16777736:0
}
}
vrrp_instance VI_2
{
state MASTER
interface eno16777736
virtual_router_id 52
priority 100
advert_int 1
authentication
{
auth_type PASS
auth_pass 1e67cca200cf
}
virtual_ipaddress
{
192.168.91.16/32 dev eno16777736 label eno16777736:1
}
virtual_routes
{
192.168.91.16/32 dev eno16777736:1
}
}
vrrp_instance VI_3 {
state BACKUP
interface eno33554984
virtual_router_id 53
priority 99
advert_int 1
authentication
{
auth_type PASS
auth_pass e027a03bcd81
}
virtual_ipaddress
{
192.168.23.15/32 dev eno33554984 label eno33554984:0
}
virtual_routes
{
192.168.23.15/32 dev eno33554984:0
}
}
vrrp_instance VI_4
{
state MASTER
interface eno33554984
virtual_router_id 54
priority 100
advert_int 1
authentication
{
auth_type PASS
auth_pass f03c1c91c7fc
}
virtual_ipaddress
{
192.168.23.14/32 dev eno33554984 label eno33554984:1
}
virtual_routes
{
192.168.23.14/32 dev eno33554984:1
}
}
virtual_server 192.168.91.15 80
{
delay_loop 6
lb_algo wrr
lb_kind NAT
protocol TCP
real_server 192.168.23.16 80
{
weight 6
HTTP_GET
{
url
{
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.23.18 80
{
weight 3
HTTP_GET
{
url
{
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
virtual_server 192.168.91.16 80
{
delay_loop 6
lb_algo wrr
lb_kind NAT
protocol TCP
real_server 192.168.23.19 80
{
weight 6
HTTP_GET
{
url
{
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.23.17 80
{
weight 3
HTTP_GET
{
url
{
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
[root@node2 keepalived]# ifconfig
eno16777736: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.91.130 netmask 255.255.255.0 broadcast 192.168.91.255
inet6 fe80::20c:29ff:fe2b:9929 prefixlen 64 scopeid 0x20<link>
ether 00:0c:29:2b:99:29 txqueuelen 1000 (Ethernet)
RX packets 34791 bytes 10658056 (10.1 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 28618 bytes 2324860 (2.2 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
eno16777736:1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.91.16 netmask 255.255.255.255 broadcast 0.0.0.0
ether 00:0c:29:2b:99:29 txqueuelen 1000 (Ethernet)
eno33554984: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.23.12 netmask 255.255.255.0 broadcast 192.168.23.255
inet6 fe80::20c:29ff:fe2b:9933 prefixlen 64 scopeid 0x20<link>
ether 00:0c:29:2b:99:33 txqueuelen 1000 (Ethernet)
RX packets 66429 bytes 8629069 (8.2 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 86941 bytes 6784854 (6.4 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
eno33554984:1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.23.14 netmask 255.255.255.255 broadcast 0.0.0.0
ether 00:0c:29:2b:99:33 txqueuelen 1000 (Ethernet)
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10<host>
loop txqueuelen 0 (Local Loopback)
RX packets 150 bytes 10963 (10.7 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 150 bytes 10963 (10.7 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
[root@node2 keepalived]# cat /proc/sys/net/ipv4/ip_forward
1
[root@node2 keepalived]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.91.15:80 wrr
-> 192.168.23.16:80 Masq 6 0 0
-> 192.168.23.18:80 Masq 3 0 0
TCP 192.168.91.16:80 wrr
-> 192.168.23.16:80 Masq 6 0 0
-> 192.168.23.19:80 Masq 3 0 0
Real Server 配置好IP地址。网段:192.168.23.0/24 尾数从16开始偶数的默认网关:192.168.23.15 ,尾数为奇数的默认网关:192.168.23.14
测试:客户端分别访问http://192.168.91.15 和 http://192.168.91.16 得到的结果是15为区域1的服务器 16为区域2的服务器
有没有更好的办法让内部Real Server 不分区,默认网关一样。
问题:客户端只能通过一个外网VIP1访问后端服务器的资源,另一个外网VIP2无法访问,原因在于后端服务器的默认网关只能配一个,默认网关IP地址只能
配置在一台DR上的子接口,即内网VIP。所以采用了折衷的办法:将后端服务器划分成两个区域,网段一样,子网掩码一样,不一样的只是IP地址和
默认网关不一样。使用两个外网VIP和两个内网VIP。不知道是否有更好的办法,
原创文章,作者:jslijb,如若转载,请注明出处:http://www.178linux.com/12698
评论列表(2条)
写标签的意识非常赞,代码没有格式化显得非常乱,没有读下去的欲望
@stanley:多谢建议!可能检查不仔细,提交之前是代码是有格式化的!