为了测试方便:全部机子关闭防火墙或清空防火墙规则
corosync v2 + pacemaker
RHEL 7.0: node1:192.168.10.201 主机名:node1
RHEL 7.3:node2: 192.168.10.202 主机名:node2
RHEL 7.3:node3: 192.168.10.203 主机名:node3
RHEL 7.3:node4: 192.168.10.203 主机名:node4
node1、node2、node3、node4的/etc/hosts文件中添加:
192.168.10.201 node1
192.168.10.202 node2
192.168.10.203 node3
192.168.10.204 node4
·node4设置免密ssh登录node1、node2、node3、node4
·设置node1、node2、node3、node4时间同步,可以使用ntpdate或者使用硬件时钟同步(4台机时间相同,设置过程略)
1、node2、node3、node4安装corosync 、pacemaker
yum install corosync pacemaker -y
2、配置corosync
[root@node4 ~]# cd /etc/corosync/
[root@node4 corosync]# cp corosync.conf.example corosync.conf
[root@node4 corosync]# vim corosync.conf
totem {
version: 2
crypto_cipher: aes128
crypto_hash: sha1
secauth: on
interface {
ringnumber: 0
bindnetaddr: 192.168.10.0
mcastaddr: 239.185.1.31
mcastport: 5405
ttl: 1
}
}
nodelist { #节点设置
node {
ring0_addr:192.168.10.204
nodeid:1
}
node {
ring0_addr:192.168.10.203
nodeid:2
}
node {
ring0_addr:192.168.10.202
nodeid:3
}
}
logging {
fileline: off
to_stderr: no
to_logfile: yes
logfile: /var/log/cluster/corosync.log
to_syslog: no
debug: off
timestamp: on
logger_subsys {
subsys: QUORUM
debug: off
}
}
quorum { # 指定仲裁机制是corosync投票系统
provider: corosync_votequorum
}
3、在node4的/etc/corosync目录中生成密钥
[root@node4 corosync]# corosync-keygen
将密钥和配置文件发给node2、node3
[root@node4 corosync]# scp -p authkey corosync.conf root@192.168.10.202:/etc/corosync/
[root@node4 corosync]# scp -p authkey corosync.conf root@192.168.10.203:/etc/corosync/
4、node2、node3、node4配置pacemaker
# vim /etc/sysconfig/pacemaker
PCMK_logfile=/var/log/pacemaker.log
5、安装crmsh,安装在node2上
下载以下安装包:
crmsh-3.0.0-6.2.noarch.rpm python-parallax-1.0.0a1-7.1.noarch.rpm
crmsh-scripts-3.0.0-6.2.noarch.rpm
python-pssh-2.3.1-7.2.noarch.rpm
pssh-2.3.1-7.2.noarch.rpm
下载地址:
http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-7/noarch/
HA Web Service构建:
vip:192.168.10.92,ocf:hearbeat:IPaddr
httpd: systemd
nfs shared storage: ocf: hearbeat:Filesystem
node2、node3、nod4:提供httpd服务 node1:提供nfs共享服务
node2、3、4启动corosync、pacemaker服务
一、服务安装设置
(1)node1设置:共享目录为:/nfs/htdocs
1、安装nfs
[root@node1 ~]# yum install nfs-utils -y
2、创建共享目录
[root@node1 ~]# mkdir /nfs/htdocs -pv
mkdir: created directory ‘/nfs’
mkdir: created directory ‘/nfs/htdocs’
3、创建测试文件
[root@node1 ~]# echo "<h1><font color="red">Test Page on NFS Server…</font></h1>"> /nfs/htdocs/index.html
4、配置export文件
[root@node1 ~]# vim /etc/exports
/nfs/htdocs *(rw)
5、启动nfs服务
[root@node1 ~]# systemctl start rpcbind
[root@node1 ~]# systemctl start nfs-server
[root@node1 ~]# systemctl enable rpcbind
[root@node1~]# systemctl enable nfs-server
(2)node2、node3、node4安装httpd服务
1、node2、node3、node4安装httpd服务:
# yum install httpd -y
2、node2、node3、node4设置SELinux:
# setsebool -P httpd_use_nfs 1
3、node2、node3、node4将httpd服务添加到开机启动
# systemctl enable httpd
二、集群配置
前面crmsh安装在node2中
[root@node2 ~]# crm configuration
crm(live)configure# property stonith-enabled=false
crm(live)configure# verify
crm(live)configure# commit
crm(live)configure#
1、配置vip、webserver
crm(live)configure# primitive webip ocf:heartbeat:IPaddr2 params ip="192.168.10.92" op monitor interval=30s timeout=20s
crm(live)configure# primitive webserver systemd:httpd op monitor interval=20s timeout=20s
crm(live)configure# verify
2、nfs配置
crm(live)configure# primitive webstore ocf:heartbeat:Filesystem params device="192.168.10.204:/nfs/htdocs" directory="/var/www/html" fstype="nfs" op start timeout=60s op stop timeout=60s op monitor interval=20s timeout=40s
3、设置约束,webserver、webstore、webip必须同时在同一个机子上
crm(live)configure# colocation webserver_with_webstore_and_webip inf: webserver ( webip webstore)
设置启动顺序,先启动ip再启动存储再启动web服务
crm(live)configure# order webstore_after_webip Mandatory: webip webstore
crm(live)configure# order webserver_after_webstore Mandatory: webstore webserver
前面的设置好之后,最后才提交设置:
crm(live)configure# commit
查看一下配置情况:
node2、node3、node4都要设置SELinux:
#setsebool -P httpd_use_nfs 1
查看状态:
crm(live)configure# cd
crm(live)# status
OK。。。配置正确
浏览器打开:192.168.10.92
OK。。。。
节点下线:crm node standby
节点上线:crm node online
location约束:位置约束,资源对节点的倾向性
比如web服务倾向于node3:
crm(live)configure# location webservice_pref_node3 webip 100: node3
crm(live)configure# verify
crm(live)configure# commit
原来的资源在node2上,现在在node3上:
集群构建2
1、node2、node3、node4停止corosync、pacemaker服务,安装pcs启动pcsd服务。yum install pcs -y
2、node1配置好ansible的hosts文件:
[ha]
192.168.10.204
192.168.10.202
192.168.10.203
3、设置认证用户密码:
[root@node1 ~]# ansible ha -m shell -a 'echo 123456 | passwd –stdin hacluster'
4、node4上做认证测试:
[root@node4 ~]# pcs cluster auth node2 node3 node4 -u hacluster
认证通过
5、构建集群
[root@node4 ~]# pcs cluster setup –name=mycluster node2 node3 node4 –force
Destroying cluster on nodes: node2, node3, node4…
node2: Stopping Cluster (pacemaker)…
node3: Stopping Cluster (pacemaker)…
node4: Stopping Cluster (pacemaker)…
node3: Successfully destroyed cluster
node2: Successfully destroyed cluster
node4: Successfully destroyed cluster
Sending 'pacemaker_remote authkey' to 'node2', 'node3', 'node4'
node4: successful distribution of the file 'pacemaker_remote authkey'
node2: successful distribution of the file 'pacemaker_remote authkey'
node3: successful distribution of the file 'pacemaker_remote authkey'
Sending cluster config files to the nodes…
node2: Succeeded
node3: Succeeded
node4: Succeeded
Synchronizing pcsd certificates on nodes node2, node3, node4…
node3: Success
node2: Success
node4: Success
Restarting pcsd on the nodes in order to reload the certificates…
node4: Success
node3: Success
node2: Success
6、启动集群
[root@node4 ~]# pcs cluster start –all
[root@node4 ~]# corosync-cfgtool -s
Printing ring status.
Local node ID 3
RING ID 0
id = 192.168.10.204
status = ring 0 active with no faults
[root@node4 ~]# corosync-cmapctl | grep members
7、定义资源webip、webstore、webserver
[root@node4 ~]# pcs resource create webip ocf:heartbeat:IPaddr ip="192.168.10.93" op monitor interval=20s timeout=10s
[root@node4 ~]# pcs resource create webstore ocf:heartbeat:Filesystem device="192.168.10.201:/nfs/htdocs" directory="/var/www/html" fstype=nfs op start timeout=60s op stop timeout=60s op monitor interval=20s timeout=40s
[root@node4 ~]# pcs resource create webserver systemd:httpd op monitor interval=30s timeout=20s
8、资源分组排序webip ->webstore-> webserver
[root@node4 ~]# pcs resource group add webservice webip webstore webserver
查看集群状态:
浏览器打开:192.168.10.93
OK。。。构建成功!
节点下线:[root@node4 ~]# pcs cluster standby node2
节点上线:[root@node4 ~]# pcs cluster unstandby node2
资源约束:
[root@node4 ~]# pcs constraint location add webservice_pref_node4 webservice node4 100
3 条评论
小伟 · 2017年12月18日 下午1:49
可以写个搭建Linux WordPress的教程么,想学 🙂
caomuzhong · 2018年1月7日 下午7:07
搭建lamp+WordPress即可,很简单的
评论已关闭。