#以node01为例修改主机名
#node02需要同样的配置
[root@node01 ~]# cat /etc/hosts 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4::1 localhost localhost.localdomain localhost6 localhost6.localdomain610.10.10.5 node01 10.10.10.6 node02#关闭防火墙以及SElinux
#node02需要同样配置
[root@node01 ~]# systemctl stop firewalld [root@node01 ~]# systemctl disable firewalld Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.Removed symlink /etc/systemd/system/basic.target.wants/firewalld.service.[root@node01 ~]# setenforce 0[root@node01 ~]# sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
#配置pacemaker的EPEL源
#node02需要同样配置
[root@node01 ~]# cat /etc/yum.repos.d/pacemaker.repo [pacemaker]name=pacemakerbaseurl=http://mirror.centos.org/centos/$releasever/os/$basearch/enabled=1gpgcheck=0#下载pacemaker安装包
#node02需要同样配置
[root@node01 ~]# yum install pacemaker pcs resource-agents -y #配置节点之间认证
#node02需要同样配置
[root@node01 ~]# ssh-keygen -t rsa -P ''
[root@node01 ~]# ssh-copy-id node02
#修改pacemaker的用户密码(pacemaker使用的用户是hacluster,软件安装完后用户以添加)
#node02需要同样配置
[root@node01 ~]# passwd hacluster
#启动pcs服务
#node02需要同样配置
[root@node01 ~]# systemctl restart pcsd
#设置节点认证#只在node01操作,所有操作会自动同步到node02
[root@node01 ~]# pcs cluster auth node01 node02
Username: hacluster Password: node02: Authorizednode01: Authorized#创建一个名为mycluster的集群,并将node01和node02加入到集群节点上#只在node01操作,所有操作会自动同步到node02
[root@node01 ~]# pcs cluster setup --force --name mycluster node01 node02
Destroying cluster on nodes: node01, node02...node01: Stopping Cluster (pacemaker)...node02: Stopping Cluster (pacemaker)...node01: Successfully destroyed clusternode02: Successfully destroyed clusterSending cluster config files to the nodes...node01: Succeedednode02: SucceededSynchronizing pcsd certificates on nodes node01, node02...node02: Successnode01: SuccessRestarting pcsd on the nodes in order to reload the certificates...node02: Successnode01: Success[root@node01 ~]# pcs cluster start --all
node01: Starting Cluster...node02: Starting Cluster...#查看集群状态[root@node01 ~]# pcs status
Cluster name: myclusterWARNING: no stonith devices and stonith-enabled is not falseStack: corosyncCurrent DC: node02 (version 1.1.15-11.el7_3.5-e174ec8) - partition with quorumLast updated: Mon Sep 11 22:54:14 2017 Last change: Mon Sep 11 22:53:39 2017 by hacluster via crmd on node022 nodes and 0 resources configuredOnline: [ node01 node02 ]No resourcesDaemon Status: corosync: active/disabled pacemaker: active/disabled pcsd: active/disabled#查看corosync的状态
[root@node01 ~]# pcs status corosync
Membership information---------------------- Nodeid Votes Name 1 1 node01 (local) 2 1 node02#查看状态是否正常[root@node01 ~]# crm_verify -L -V
error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrityErrors found during check: config not valid**发现报错了#关闭报错信息
[root@node01 ~]# pcs property set stonith-enabled=false
#设置VIP
#只在node01操作,所有操作会自动同步到node02
[root@node01 ~]# pcs resource create ClusterIP ocf:heartbeat:IPaddr2 nic=ens34 ip=10.10.10.8 cidr_netmask=32 op monitor interval=30s
#安装http服务
#node02需要同样配置
[root@node01 ~]# yum -y install httpd
#编辑apache首页
#node02需要同样配置(node2上把node01改为node02)
[root@node01 ~]# vi /var/www/html/index.html
<html> <body>welcome to node 1 </body> </html>#配置apache的URL;#为了监视您的Apache实例的健康和恢复它如果失败,起搏器假设所使用的资源代理服务器状态的URL。
#node02需要同样配置
[root@node01 ~]# vi /etc/httpd/conf/httpd.conf
<Location /server-status>
SetHandler server-status Order deny,allow Deny from all Allow from 127.0.0.1</Location>#将apache加入集群
#只在node01操作,所有操作会自动同步到node02
[root@node01 ~]# pcs resource create Web ocf:heartbeat:apache configfile=/etc/httpd/conf/httpd.conf statusurl="http://localhost/server-status" op monitor interval=1min
#设置apache的超时时间
##只在node01操作,所有操作会自动同步到node02
[root@node01 ~]# pcs resource op defaults timeout=240s
#将VIP和apache捆绑在一起
##只在node01操作,所有操作会自动同步到node02
[root@node01 ~]# pcs constraint colocation add Web ClusterIP INFINITY
#设置启动顺序
##只在node01操作,所有操作会自动同步到node02
[root@node01 ~]# pcs constraint order ClusterIP then Web
Adding ClusterIP Web (kind: Mandatory) (Options: first-action=start then-action=start)#查看集群状态
[root@node01 ~]# pcs status
Cluster name: myclusterStack: corosyncCurrent DC: node01 (version 1.1.15-11.el7_3.5-e174ec8) - partition with quorumLast updated: Tue Sep 12 16:06:59 2017 Last change: Tue Sep 12 16:06:49 2017 by root via cibadmin on node012 nodes and 2 resources configuredOnline: [ node01 node02 ]Full list of resources: ClusterIP (ocf::heartbeat:IPaddr2): Started node01 Web (ocf::heartbeat:apache): Started node01Daemon Status: corosync: active/disabled pacemaker: active/disabled pcsd: active/disabled#此时集群都在node01上。
#我们宕掉node01,在node02上查看集群状态
[root@node02 ~]# pcs status
Cluster name: myclusterStack: corosyncCurrent DC: node02 (version 1.1.15-11.el7_3.5-e174ec8) - partition with quorumLast updated: Tue Sep 12 17:02:24 2017 Last change: Tue Sep 12 17:01:57 2017 by root via cibadmin on node012 nodes and 2 resources configuredOnline: [ node02 ]OFFLINE: [ node01 ]Full list of resources: ClusterIP (ocf::heartbeat:IPaddr2): Started node02 Web (ocf::heartbeat:apache): Started node02Daemon Status: corosync: active/disabled pacemaker: active/disabled pcsd: active/disabled