一、实验图
二、环境准备
1)确保时间同步
[root@SQL1 ~]# crontab -e */5 * * * * /usr/sbin/ntpdate 172.16.2.15
[root@SQL2 ~]# crontab -e */5 * * * * /usr/sbin/ntpdate 172.16.2.15
2)确保可以基于主机名通信
[root@SQL1 ~]# vim /etc/hosts 172.16.2.12 SQL1.linux.com SQL1 172.16.2.14 SQL2.iinux.com SQL2
[root@SQL2 ~]# vim /etc/hosts 172.16.2.12 SQL1.linux.com SQL1 172.16.2.14 SQL2.iinux.com SQL2
3)确保可以基于ssh秘钥通信
[root@SQL1 ~]# ssh-keygen -P '' [root@SQL1 ~]# ssh-copy-id -i .ssh/id_rsa.pub root@172.16.2.14 root@172.16.2.14's password:
[root@SQL2 ~]# ssh-keygen -P '' [root@SQL2 ~]# ssh-copy-id -i .ssh/id_rsa.pub root@172.16.2.12 root@172.16.2.12's password:
4)测试
[root@SQL1 ~]# date; ssh SQL2 'date' Tue Jun 30 11:12:37 CST 2015 Tue Jun 30 11:12:37 CST 2015
[root@SQL2 ~]# date; ssh SQL1 'date' Tue Jun 30 11:14:01 CST 2015 Tue Jun 30 11:14:01 CST 2015
三、安装配置corosync;pacemaker
安装corosync
[root@SQL1 ~]# yum -y install corosync
[root@SQL2 ~]# yum -y install corosync
安装pacemaker
[root@SQL1 ~]# yum -y install pacemaker
[root@SQL2 ~]# yum -y install pacemaker
配置corosync
[root@SQL1 ~]# cd /etc/corosync \\切换到corosync的配置文件目录 [root@SQL1 corosync]# cp corosync.conf.example corosync.conf \\提供corosync的配置文件 [root@SQL1 corosync]# vim corosync.conf compatibility: whitetank \\兼容旧版本的corosync totem { \\用来设置监听集群之间心跳信息传递机制 version: 2 \\corosync的版本,默认即可 secauth: on \\启用安全秘钥机制 threads: 0 \\启动多少个线程传递心跳信息 interface { ringnumber: 0 \\循环次数 bindnetaddr: 172.16.2.0 \\绑定的网络地址,是一个网络地址不是具体的ip地址 mcastaddr: 239.225.100.10 \\定义组播地址 mcastport: 5405 \\定义监听端口号 ttl: 1 \\ 信息传递只允许在当前网络中传输 } } logging { fileline: off \\默认即可 to_stderr: no \\是否将错误输出到终端,no为禁止输出到终端,保持默认即可 to_logfile: yes \\记录为日志文件 logfile: /var/log/cluster/corosync.log \\记录日志文件路径 #to_syslog: yes\\是否启用rsyslog日志,这里已经启用corosync自己记录日志功能,所有这里注释掉,不使用 debug: off \\debug日志;只有在调试的时候开启 timestamp: on \\记录日志的时间戳,会有IO操作,根据实际情况开启 logger_subsys { subsys: AMF \\默认即可 debug: off \\默认即可 } } service { \\定义pacemaker以corosync的插件方式运行 ver: 0 \\版本;定义pacemaker的版本 name: pacemaker \\定义名称 } aisexec { \\定义运行的用户和用户组 user: root group: root }
生成秘钥
[root@SQL1 corosync]# corosync-keygen \\这里需要输入随机数,用于秘钥生成
复制相同的配置文件与秘钥给SQL2服务器
[root@SQL1 corosync]# scp -p authkey corosync.conf SQL2:/etc/corosync
启动corosync
[root@SQL1 corosync]# service corosync start
[root@SQL2 ~]# service corosync start
查看日志,确保corosync,pacemaker正常启动
[root@SQL1 ~]# grep -e "Corosync Cluster Engine" -e "configration file" /var/log/cluster/corosync.log Jun 30 11:15:20 corosync [MAIN ] Corosync Cluster Engine ('1.4.7'): started and ready to provide service. Jun 30 11:18:32 corosync [MAIN ] Corosync Cluster Engine exiting with status 0 at main.c:2055. Jun 30 11:18:50 corosync [MAIN ] Corosync Cluster Engine ('1.4.7'): started and ready to provide service.
[root@SQL1 ~]# grep pcmk_startup /var/log/cluster/corosync.log Jun 30 11:15:20 corosync [pcmk ] info: pcmk_startup: CRM: Initialized Jun 30 11:15:20 corosync [pcmk ] Logging: Initialized pcmk_startup Jun 30 11:15:20 corosync [pcmk ] info: pcmk_startup: Maximum core file size is: 18446744073709551615 Jun 30 11:15:20 corosync [pcmk ] info: pcmk_startup: Service: 9 Jun 30 11:15:20 corosync [pcmk ] info: pcmk_startup: Local hostname: SQL1.linux.com
[root@SQL1 ~]# grep TOTEM /var/log/cluster/corosync.log Jun 30 11:15:20 corosync [TOTEM ] Initializing transport (UDP/IP Multicast). Jun 30 11:15:20 corosync [TOTEM ] Initializing transmit/receive security: libtomcrypt SOBER128/SHA1HMAC (mode 0). Jun 30 11:15:20 corosync [TOTEM ] The network interface [172.16.2.12] is now up. Jun 30 11:15:20 corosync [TOTEM ] A processor joined or left the membership and a new membership was formed. Jun 30 11:15:30 corosync [TOTEM ] A processor joined or left the membership and a new membership was formed.
[root@SQL1 ~]# grep ERROR: /var/log/cluster/corosync.log | grep -v unpack_resources Jun 30 11:15:20 corosync [pcmk ] ERROR: process_ais_conf: You have configured a cluster using the Pacemaker plugin for Corosync. The plugin is not supported in this environment and will be removed very soon.
四、配置高可用mysql
配置nfs
[root@nfs ~]# groupadd -g 350 mysql \\由于nfs限制,需要在nfs服务器上创建mysql用户和mysql组 [root@nfs ~]# useradd -u 350 -g 350 mysql [root@nfs ~]# vim /etc/exports /mydata 172.16.2.0/24(rw,no_root_squash) [root@nfs ~]# mkdir -pv /mydata/data [root@nfs ~]# chown -R mysql.mysql /mydata/data [root@nfs ~]# service nfs start 挂载测试: [root@SQL1 ~]# mount -t nfs 172.16.2.13:/mydata /mnt [root@SQL1 ~]# ls /mnt data [root@SQL1 ~]# umount /mnt [root@SQL2 ~]# mount -t nfs 172.16.2.13:/mydata /mnt [root@SQL2 ~]# ls /mnt data [root@SQL2 ~]# umount /mnt
配置mariadb
[root@SQL1 ~]# tar xf mariadb-5.5.36-linux-x86_64.tar.gz -C /usr/local/ [root@SQL1 ~]# cd /usr/local/ [root@SQL1 local]# ln -sv mariadb-5.5.36-linux-x86_64/ mysql [root@SQL1 local]# groupadd -g 350 mysql \\mysql的UID和GID要与NFS服务器上的mysql的UID和GID相同 [root@SQL1 local]# useradd -u 350 -g 350 mysql [root@SQL1 local]# cd mysql/ [root@SQL1 mysql]# mkdir /mydata [root@SQL1 mysql]# mount -t nfs 172.16.2.13:/mydata/ /mydata/ [root@SQL1 mysql]# chown -R root.mysql ./ [root@SQL1 mysql]# scripts/mysql_install_db --user=mysql --datadir=/mydata/data/ [root@SQL1 mysql]# mkdir /etc/mysql [root@SQL1 mysql]# cp support-files/my-large.cnf /etc/mysql/my.cnf datadir=/mydata/data innodb_file_per_table = on skip_name_resolve = on [root@SQL1 mysql]# cp support-files/mysql.server /etc/init.d/mysqld [root@SQL1 mysql]# chkconfig --add /etc/init.d/mysqld [root@SQL1 mysql]# service mysqld start Starting MySQL..... SUCCESS! [root@SQL1 ~]# mysql mysql> CREATE DATABASE hamysql; mysql> CREATE DATABASE hamysql; mysql> CREATE TABLE hatest(id INT); mysql> GRANT ALL ON hamysql.* TO "hamysql"@"172.16.2.%" IDENTIFIED BY "hamysql"; mysql> FLUSH PRIVILEGES; [root@SQL1 ~]# service mysqld stop Shutting down MySQL. SUCCESS!
[root@SQL2 ~]# tar xf mariadb-5.5.36-linux-x86_64.tar.gz -C /usr/local [root@SQL2 ~]# groupadd -g 350 mysql [root@SQL2 ~]# useradd -u 350 -g 350 mysql [root@SQL2 ~]# mkdir /mydata [root@SQL2 ~]# mount -t nfs 172.16.2.13:/mydata/ /mydata/ [root@SQL2 ~]# cd /usr/local/ [root@SQL2 local]# ln -sv mariadb-5.5.36-linux-x86_64/ mysql [root@SQL2 local]# cd mysql/ [root@SQL2 mysql]# mkdir /etc/mysql [root@SQL2 mysql]# cp support-files/my-large.cnf /etc/mysql/my.cnf [root@SQL2 mysql]# vim /etc/mysql/my.cnf datadir = /mydata/data innodb_file_per_table = on skip_name_resolve = on [root@SQL2 mysql]# cp support-files/mysql.server /etc/init.d/mysqld [root@SQL2 mysql]# chkconfig --add /etc/init.d/mysqld [root@SQL2 mysql]# chown -R root.mysql ./ [root@SQL2 mysql]# service mysqld start Starting MySQL..... SUCCESS! [root@SQL2 mysql]# mysql -h 172.16.2.14 -u hamysql -p mysql> SHOW DATABASES; +--------------------+ | Database | +--------------------+ | information_schema| | hamysql | | test | +--------------------+ [root@SQL2 mysql]# service mysqld stop [root@SQL2 mysql]# umount /mydata/
安装crmsh(配置yum源:http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-6/)
[root@SQL1 ~]# vim /etc/yum.repos.d/crmsh.repo [suse_crmsh] name=crmsh baseurl=http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-6/ enabled=1 gpgcheck=0 [root@SQL1 ~]# yum -y install crmsh
修改全局属性:
[root@SQL1 ~]# crm crm(live)# configure crm(live)configure# property stonith-enabled=false crm(live)configure# verify crm(live)configure# commit crm(live)configure# property no-quorum-policy=ignore crm(live)configure# verify crm(live)configure# commit crm(live)configure# property default-resource-stickiness=200 crm(live)configure# verify crm(live)configure# commit
配置VIP资源:
crm(live)configure# primitive hamysql_vip ocf:heartbeat:IPaddr params ip="172.16.2.10" nic="eth0" cidr_netmask="24" broadcast="172.16.2.255" op monitor timeout=20s interval=10s crm(live)configure# verify crm(live)configure# commit
配置nfs资源:
crm(live)configure# primitive hamysql_fs ocf:heartbeat:Filesystem params device="172.16.2.13:/mydata" directory="/mydata" fstype="nfs" op monitor timeout=40s interval=20s op start timeout=60s op stop timeout=60s crm(live)configure# verify crm(live)configure# commit
配置mysql资源:
crm(live)configure# primitive hamysql_mysql lsb:mysqld op monitor timeout=15s interval=15s crm(live)configure# verify crm(live)configure# commit
定义order限制:
crm(live)configure# order hamysql_fs_after_hamysql_vip inf: hamysql_vip hamysql_fs crm(live)configure# verify crm(live)configure# commit crm(live)configure# order hamysql_mysql_after_hamysql_fs inf: hamysql_fs hamysql_mysql crm(live)configure# verify crm(live)configure# commit
定义组资源:
crm(live)configure# group hasql hamysql_vip hamysql_fs hamysql_mysql INFO: resource references in order:hamysql_fs_after_hamysql_vip updated INFO: resource references in order:hamysql_fs_after_hamysql_vip updated INFO: resource references in order:hamysql_mysql_after_hamysql_fs updated INFO: resource references in order:hamysql_mysql_after_hamysql_fs updated crm(live)configure# verify crm(live)configure# commit
查看mysql运行状态:
crm(live)# status Last updated: Tue Jun 30 14:17:47 2015 Last change: Tue Jun 30 14:17:42 2015 Stack: classic openais (with plugin) Current DC: SQL1.linux.com - partition with quorum Version: 1.1.11-97629de 2 Nodes configured, 2 expected votes 3 Resources configured Online: [ SQL1.linux.com SQL2.linux.com ] Resource Group: hasql hamysql_vip (ocf::heartbeat:IPaddr): Started SQL1.linux.com hamysql_fs (ocf::heartbeat:Filesystem): Started SQL1.linux.com hamysql_mysql (lsb:mysqld): Started SQL1.linux.com
测试:
[root@SQL2 ~]# mysql -h 172.16.2.10 -uhamysql -p mysql> SHOW DATABASES; +--------------------+ | Database | +--------------------+ | information_schema| | hamysql | | test | +--------------------+
由于字数限制,好了就写到这里吧…..
原创文章,作者:马行空,如若转载,请注明出处:http://www.178linux.com/5802